diff --git a/.gitignore b/.gitignore index 829f19a7427b..db9598932e75 100644 --- a/.gitignore +++ b/.gitignore @@ -159,7 +159,6 @@ _darcs/ /mk/install.mk /mk/project.mk /mk/project.mk.old -/mk/stamp-h /mk/validate.mk /rts/package.conf.inplace /rts/package.conf.inplace.raw diff --git a/aclocal.m4 b/aclocal.m4 index 4b84e027a0d1..af492dfac9e1 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -535,18 +535,6 @@ AC_DEFUN([FPTOOLS_SET_C_LD_FLAGS], $2="$$2 -fno-stack-protector" fi - # Reduce memory usage when linking. See trac #5240. - if test -n "$LdHashSize31" - then - $3="$$3 -Wl,$LdHashSize31" - $4="$$4 $LdHashSize31" - fi - if test -n "$LdReduceMemoryOverheads" - then - $3="$$3 -Wl,$LdReduceMemoryOverheads" - $4="$$4 $LdReduceMemoryOverheads" - fi - rm -f conftest.c conftest.o AC_MSG_RESULT([done]) ]) @@ -693,8 +681,6 @@ AC_ARG_WITH($2, # Figure out how to do context diffs. Sets the output variable ContextDiffCmd. # # Note: NeXTStep thinks diff'ing a file against itself is "trouble". -# -# Used by ghc, glafp-utils/ltx, and glafp-utils/runstdtest. AC_DEFUN([FP_PROG_CONTEXT_DIFF], [AC_CACHE_CHECK([for a working context diff], [fp_cv_context_diff], [echo foo > conftest1 @@ -923,27 +909,6 @@ $2=$fp_cv_$2 ])# FP_PROG_LD_FLAG -# FP_PROG_LD_HashSize31 -# ------------ -# Sets the output variable LdHashSize31 to --hash-size=31 if ld supports -# this flag. Otherwise the variable's value is empty. -AC_DEFUN([FP_PROG_LD_HashSize31], -[ -FP_PROG_LD_FLAG([--hash-size=31],[LdHashSize31]) -])# FP_PROG_LD_HashSize31 - - -# FP_PROG_LD_ReduceMemoryOverheads -# ------------ -# Sets the output variable LdReduceMemoryOverheads to -# --reduce-memory-overheads if ld supports this flag. -# Otherwise the variable's value is empty. -AC_DEFUN([FP_PROG_LD_ReduceMemoryOverheads], -[ -FP_PROG_LD_FLAG([--reduce-memory-overheads],[LdReduceMemoryOverheads]) -])# FP_PROG_LD_ReduceMemoryOverheads - - # FP_PROG_LD_BUILD_ID # ------------ @@ -1553,6 +1518,15 @@ if test "$RELEASE" = "NO"; then AC_MSG_RESULT(given $PACKAGE_VERSION) else AC_MSG_WARN([cannot determine snapshot version: no .git directory and no VERSION file]) + dnl We'd really rather this case didn't happen, but it might + dnl do (in particular, people using lndir trees may find that + dnl the build system can't find any other date). If it does + dnl happen, then we use the current date. + dnl This way we get some idea about how recent a build is. + dnl It also means that packages built for 2 different builds + dnl will probably use different version numbers, so things are + dnl less likely to go wrong. + PACKAGE_VERSION=${PACKAGE_VERSION}.`date +%Y%m%d` fi fi @@ -2015,7 +1989,7 @@ AC_DEFUN([FIND_LLVM_PROG],[ for p in ${PATH}; do if test -d "${p}"; then $1=`${FindCmd} "${p}" -type f -perm +111 -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' -or -type l -perm +111 -maxdepth 1 -regex '.*/$3-[[0-9]]\.[[0-9]]' | ${SortCmd} -n | tail -1` - if test -n "$1"; then + if test -n "$$1"; then break fi fi diff --git a/compiler/basicTypes/DataCon.lhs b/compiler/basicTypes/DataCon.lhs index a15b7341d6d6..42032d49a84b 100644 --- a/compiler/basicTypes/DataCon.lhs +++ b/compiler/basicTypes/DataCon.lhs @@ -888,9 +888,9 @@ dataConInstArgTys :: DataCon -- ^ A datacon with no existentials or equality con -> [Type] dataConInstArgTys dc@(MkData {dcUnivTyVars = univ_tvs, dcEqSpec = eq_spec, dcExTyVars = ex_tvs}) inst_tys - = ASSERT2 ( length univ_tvs == length inst_tys - , ptext (sLit "dataConInstArgTys") <+> ppr dc $$ ppr univ_tvs $$ ppr inst_tys) - ASSERT2 ( null ex_tvs && null eq_spec, ppr dc ) + = ASSERT2( length univ_tvs == length inst_tys + , ptext (sLit "dataConInstArgTys") <+> ppr dc $$ ppr univ_tvs $$ ppr inst_tys) + ASSERT2( null ex_tvs && null eq_spec, ppr dc ) map (substTyWith univ_tvs inst_tys) (dataConRepArgTys dc) -- | Returns just the instantiated /value/ argument types of a 'DataCon', diff --git a/compiler/basicTypes/Demand.lhs b/compiler/basicTypes/Demand.lhs index 25f3091fcf82..3e8096a27241 100644 --- a/compiler/basicTypes/Demand.lhs +++ b/compiler/basicTypes/Demand.lhs @@ -7,15 +7,18 @@ \begin{code} module Demand ( - StrDmd, strBot, strTop, strStr, strProd, strCall, - AbsDmd, absBot, absTop, absProd, + StrDmd, UseDmd(..), Count(..), + countOnce, countMany, -- cardinality - Demand, JointDmd, mkProdDmd, - absDmd, topDmd, botDmd, + Demand, CleanDemand, + mkProdDmd, mkOnceUsedDmd, mkManyUsedDmd, mkHeadStrict, oneifyDmd, + getUsage, toCleanDmd, + absDmd, topDmd, botDmd, seqDmd, lubDmd, bothDmd, isTopDmd, isBotDmd, isAbsDmd, isSeqDmd, + peelUseCall, cleanUseDmd_maybe, strictenDmd, bothCleanDmd, - DmdType(..), dmdTypeDepth, lubDmdType, bothDmdType, + DmdType(..), dmdTypeDepth, lubDmdType, bothDmdEnv, bothDmdType, topDmdType, botDmdType, mkDmdType, mkTopDmdType, DmdEnv, emptyDmdEnv, @@ -28,28 +31,32 @@ module Demand ( StrictSig(..), mkStrictSig, topSig, botSig, cprProdSig, isTopSig, splitStrictSig, increaseStrictSigArity, - seqStrDmd, seqStrDmdList, seqAbsDmd, seqAbsDmdList, seqDemand, seqDemandList, seqDmdType, seqStrictSig, - evalDmd, vanillaCall, isStrictDmd, splitCallDmd, splitDmdTy, - someCompUsed, isUsed, isUsedDmd, - defer, deferType, deferEnv, modifyEnv, - isProdDmd, splitProdDmd, splitProdDmd_maybe, peelCallDmd, mkCallDmd, - dmdTransformSig, dmdTransformDataConSig, + evalDmd, cleanEvalDmd, cleanEvalProdDmd, isStrictDmd, + splitDmdTy, splitFVs, + deferDmd, deferType, deferAndUse, deferEnv, modifyEnv, + + splitProdDmd, splitProdDmd_maybe, peelCallDmd, mkCallDmd, + dmdTransformSig, dmdTransformDataConSig, argOneShots, argsOneShots, + + isSingleUsed, useType, useEnv, zapDemand, zapStrictSig, worthSplittingFun, worthSplittingThunk + ) where #include "HsVersions.h" import StaticFlags +import DynFlags import Outputable import VarEnv import UniqFM import Util import BasicTypes import Binary -import Maybes ( isJust, expectJust ) +import Maybes ( isJust, expectJust ) \end{code} %************************************************************************ @@ -60,13 +67,14 @@ import Maybes ( isJust, expectJust ) Lazy | - Str + HeadStr / \ SCall SProd \ / HyperStr \begin{code} + -- Vanilla strictness domain data StrDmd = HyperStr -- Hyper-strict @@ -75,99 +83,115 @@ data StrDmd | SCall StrDmd -- Call demand -- Used only for values of function type - | SProd [StrDmd] -- Product + | SProd [MaybeStr] -- Product -- Used only for values of product type -- Invariant: not all components are HyperStr (use HyperStr) - -- not all components are Lazy (use Str) + -- not all components are Lazy (use HeadStr) - | Str -- Head-Strict + | HeadStr -- Head-Strict -- A polymorphic demand: used for values of all types, -- including a type variable - | Lazy -- Lazy - -- Top of the lattice + deriving ( Eq, Show ) + +data MaybeStr = Lazy -- Lazy + -- Top of the lattice + | Str StrDmd deriving ( Eq, Show ) -- Well-formedness preserving constructors for the Strictness domain -strBot, strTop, strStr :: StrDmd -strBot = HyperStr -strTop = Lazy -strStr = Str - -strCall :: StrDmd -> StrDmd -strCall Lazy = Lazy -strCall HyperStr = HyperStr -strCall s = SCall s - -strProd :: [StrDmd] -> StrDmd -strProd sx - | any (== HyperStr) sx = strBot - | all (== Lazy) sx = strStr - | otherwise = SProd sx +strBot, strTop :: MaybeStr +strBot = Str HyperStr +strTop = Lazy + +mkSCall :: StrDmd -> StrDmd +mkSCall HyperStr = HyperStr +mkSCall s = SCall s + +mkSProd :: [MaybeStr] -> StrDmd +mkSProd sx + | any isHyperStr sx = HyperStr + | all isLazy sx = HeadStr + | otherwise = SProd sx + +isLazy :: MaybeStr -> Bool +isLazy Lazy = True +isLazy (Str _) = False + +isHyperStr :: MaybeStr -> Bool +isHyperStr (Str HyperStr) = True +isHyperStr _ = False -- Pretty-printing instance Outputable StrDmd where ppr HyperStr = char 'B' - ppr Lazy = char 'L' ppr (SCall s) = char 'C' <> parens (ppr s) - ppr Str = char 'S' + ppr HeadStr = char 'S' ppr (SProd sx) = char 'S' <> parens (hcat (map ppr sx)) +instance Outputable MaybeStr where + ppr (Str s) = ppr s + ppr Lazy = char 'L' + +lubMaybeStr :: MaybeStr -> MaybeStr -> MaybeStr +lubMaybeStr Lazy _ = Lazy +lubMaybeStr _ Lazy = Lazy +lubMaybeStr (Str s1) (Str s2) = Str (s1 `lubStr` s2) + lubStr :: StrDmd -> StrDmd -> StrDmd lubStr HyperStr s = s lubStr (SCall s1) HyperStr = SCall s1 -lubStr (SCall _) Lazy = Lazy -lubStr (SCall _) Str = Str +lubStr (SCall _) HeadStr = HeadStr lubStr (SCall s1) (SCall s2) = SCall (s1 `lubStr` s2) -lubStr (SCall _) (SProd _) = Str -lubStr (SProd _) HyperStr = HyperStr -lubStr (SProd _) Lazy = Lazy -lubStr (SProd _) Str = Str +lubStr (SCall _) (SProd _) = HeadStr +lubStr (SProd sx) HyperStr = SProd sx +lubStr (SProd _) HeadStr = HeadStr lubStr (SProd s1) (SProd s2) - | length s1 == length s2 = SProd (zipWith lubStr s1 s2) - | otherwise = Str -lubStr (SProd _) (SCall _) = Str -lubStr Str Lazy = Lazy -lubStr Str _ = Str -lubStr Lazy _ = Lazy + | length s1 == length s2 = mkSProd (zipWith lubMaybeStr s1 s2) + | otherwise = HeadStr +lubStr (SProd _) (SCall _) = HeadStr +lubStr HeadStr _ = HeadStr + +bothMaybeStr :: MaybeStr -> MaybeStr -> MaybeStr +bothMaybeStr Lazy s = s +bothMaybeStr s Lazy = s +bothMaybeStr (Str s1) (Str s2) = Str (s1 `bothStr` s2) bothStr :: StrDmd -> StrDmd -> StrDmd bothStr HyperStr _ = HyperStr -bothStr Lazy s = s -bothStr Str Lazy = Str -bothStr Str s = s +bothStr HeadStr s = s bothStr (SCall _) HyperStr = HyperStr -bothStr (SCall s1) Lazy = SCall s1 -bothStr (SCall s1) Str = SCall s1 +bothStr (SCall s1) HeadStr = SCall s1 bothStr (SCall s1) (SCall s2) = SCall (s1 `bothStr` s2) bothStr (SCall _) (SProd _) = HyperStr -- Weird bothStr (SProd _) HyperStr = HyperStr -bothStr (SProd s1) Lazy = SProd s1 -bothStr (SProd s1) Str = SProd s1 +bothStr (SProd s1) HeadStr = SProd s1 bothStr (SProd s1) (SProd s2) - | length s1 == length s2 = SProd (zipWith bothStr s1 s2) + | length s1 == length s2 = mkSProd (zipWith bothMaybeStr s1 s2) | otherwise = HyperStr -- Weird bothStr (SProd _) (SCall _) = HyperStr - -- utility functions to deal with memory leaks seqStrDmd :: StrDmd -> () seqStrDmd (SProd ds) = seqStrDmdList ds seqStrDmd (SCall s) = s `seq` () seqStrDmd _ = () -seqStrDmdList :: [StrDmd] -> () +seqStrDmdList :: [MaybeStr] -> () seqStrDmdList [] = () -seqStrDmdList (d:ds) = seqStrDmd d `seq` seqStrDmdList ds +seqStrDmdList (d:ds) = seqMaybeStr d `seq` seqStrDmdList ds + +seqMaybeStr :: MaybeStr -> () +seqMaybeStr Lazy = () +seqMaybeStr (Str s) = seqStrDmd s -- Splitting polymorphic demands -splitStrProdDmd :: Int -> StrDmd -> [StrDmd] -splitStrProdDmd n Lazy = replicate n Lazy -splitStrProdDmd n HyperStr = replicate n HyperStr -splitStrProdDmd n Str = replicate n Lazy +splitStrProdDmd :: Int -> StrDmd -> [MaybeStr] +splitStrProdDmd n HyperStr = replicate n strBot +splitStrProdDmd n HeadStr = replicate n strTop splitStrProdDmd n (SProd ds) = ASSERT( ds `lengthIs` n) ds -splitStrProdDmd n (SCall d) = ASSERT( n == 1 ) [d] +splitStrProdDmd _ d@(SCall {}) = pprPanic "attempt to prod-split strictness call demand" (ppr d) \end{code} %************************************************************************ @@ -176,30 +200,6 @@ splitStrProdDmd n (SCall d) = ASSERT( n == 1 ) [d] %* * %************************************************************************ -Note [Don't optimise UProd(Used) to Used] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -These two AbsDmds: - UProd [Used, Used] and Used -are semantically equivalent, but we do not turn the former into -the latter, for a regrettable-subtle reason. Suppose we did. -then - f (x,y) = (y,x) -would get - StrDmd = Str = SProd [Lazy, Lazy] - AbsDmd = Used = UProd [Used, Used] -But with the joint demand of doesn't convey any clue -that there is a product involved, and so the worthSplittingFun -will not fire. (We'd need to use the type as well to make it fire.) -Moreover, consider - g h p@(_,_) = h p -This too would get , but this time there really isn't any -point in w/w since the components of the pair are not used at all. - -So the solution is: don't collapse UProd [Used,Used] to Used; intead -leave it as-is. In effect we are using the AbsDmd to do a little bit -of boxity analysis. Not very nice. - - Used / \ UCall UProd @@ -209,93 +209,228 @@ of boxity analysis. Not very nice. Abs \begin{code} -data AbsDmd - = Abs -- Definitely unused - -- Bottom of the lattice - | UHead -- May be used; but its sub-components are - -- definitely *not* used. Roughly U(AAA) - -- Eg the usage of x in x `seq` e - -- A polymorphic demand: used for values of all types, - -- including a type variable - - | UCall AbsDmd -- Call demand for absence +-- Domain for genuine usage +data UseDmd + = UCall Count UseDmd -- Call demand for absence -- Used only for values of function type - | UProd [AbsDmd] -- Product + | UProd [MaybeUsed] -- Product -- Used only for values of product type -- See Note [Don't optimise UProd(Used) to Used] -- [Invariant] Not all components are Abs -- (in that case, use UHead) + | UHead -- May be used; but its sub-components are + -- definitely *not* used. Roughly U(AAA) + -- Eg the usage of x in x `seq` e + -- A polymorphic demand: used for values of all types, + -- including a type variable + -- Since (UCall _ Abs) is ill-typed, UHead doesn't + -- make sense for lambdas + | Used -- May be used; and its sub-components may be used -- Top of the lattice deriving ( Eq, Show ) +-- Extended usage demand for absence and counting +data MaybeUsed + = Abs -- Definitely unused + -- Bottom of the lattice + + | Use Count UseDmd -- May be used with some cardinality + deriving ( Eq, Show ) + +-- Abstract counting of usages +data Count = One | Many + deriving ( Eq, Show ) -- Pretty-printing -instance Outputable AbsDmd where - ppr Abs = char 'A' - ppr Used = char 'U' - ppr (UCall a) = char 'C' <> parens (ppr a) - ppr UHead = char 'H' - ppr (UProd as) = (char 'U') <> parens (hcat (map ppr as)) +instance Outputable MaybeUsed where + ppr Abs = char 'A' + ppr (Use Many a) = ppr a + ppr (Use One a) = char '1' <> char '*' <> ppr a + +instance Outputable UseDmd where + ppr Used = char 'U' + ppr (UCall c a) = char 'C' <> ppr c <> parens (ppr a) + ppr UHead = char 'H' + ppr (UProd as) = char 'U' <> parens (hcat (punctuate (char ',') (map ppr as))) + +instance Outputable Count where + ppr One = char '1' + ppr Many = text "" -- Well-formedness preserving constructors for the Absence domain -absBot, absTop, absHead :: AbsDmd -absBot = Abs -absHead = UHead -absTop = Used - -absCall :: AbsDmd -> AbsDmd -absCall Used = Used -absCall Abs = Abs -absCall a = UCall a - -absProd :: [AbsDmd] -> AbsDmd -absProd ux --- | all (== Used) ux = Used +countOnce, countMany :: Count +countOnce = One +countMany = Many + +useBot, useTop :: MaybeUsed +useBot = Abs +useTop = Use Many Used + +mkUCall :: Count -> UseDmd -> UseDmd +--mkUCall c Used = Used c +mkUCall c a = UCall c a + +mkUProd :: [MaybeUsed] -> UseDmd +mkUProd ux | all (== Abs) ux = UHead | otherwise = UProd ux -lubAbs :: AbsDmd -> AbsDmd -> AbsDmd -lubAbs Abs x = x -lubAbs UHead Abs = UHead -lubAbs UHead x = x -lubAbs (UCall u1) Abs = UCall u1 -lubAbs (UCall u1) UHead = UCall u1 -lubAbs (UCall u1) (UCall u2) = UCall (u1 `lubAbs` u2) -lubAbs (UCall _) _ = Used -lubAbs (UProd u1) Abs = UProd u1 -lubAbs (UProd u1) UHead = UProd u1 -lubAbs (UProd u1) (UProd u2) - | length u1 == length u2 = UProd (zipWith lubAbs u1 u2) - | otherwise = Used -lubAbs (UProd _) (UCall _) = Used -lubAbs (UProd ds) Used = UProd (map (`lubAbs` Used) ds) -- Note [Don't optimise UProd(Used) to Used] -lubAbs Used (UProd ds) = UProd (map (`lubAbs` Used) ds) -- Note [Don't optimise UProd(Used) to Used] -lubAbs Used _ = Used - -bothAbs :: AbsDmd -> AbsDmd -> AbsDmd -bothAbs = lubAbs - --- utility functions -seqAbsDmd :: AbsDmd -> () -seqAbsDmd (UProd ds) = seqAbsDmdList ds -seqAbsDmd (UCall d) = seqAbsDmd d -seqAbsDmd _ = () - -seqAbsDmdList :: [AbsDmd] -> () -seqAbsDmdList [] = () -seqAbsDmdList (d:ds) = seqAbsDmd d `seq` seqAbsDmdList ds +lubCount :: Count -> Count -> Count +lubCount _ Many = Many +lubCount Many _ = Many +lubCount x _ = x + +lubMaybeUsed :: MaybeUsed -> MaybeUsed -> MaybeUsed +lubMaybeUsed Abs x = x +lubMaybeUsed x Abs = x +lubMaybeUsed (Use c1 a1) (Use c2 a2) = Use (lubCount c1 c2) (lubUse a1 a2) + +lubUse :: UseDmd -> UseDmd -> UseDmd +lubUse UHead u = u +lubUse (UCall c u) UHead = UCall c u +lubUse (UCall c1 u1) (UCall c2 u2) = UCall (lubCount c1 c2) (lubUse u1 u2) +lubUse (UCall _ _) _ = Used +lubUse (UProd ux) UHead = UProd ux +lubUse (UProd ux1) (UProd ux2) + | length ux1 == length ux2 = UProd $ zipWith lubMaybeUsed ux1 ux2 + | otherwise = Used +lubUse (UProd {}) (UCall {}) = Used +-- lubUse (UProd {}) Used = Used +lubUse (UProd ux) Used = UProd (map (`lubMaybeUsed` useTop) ux) +lubUse Used (UProd ux) = UProd (map (`lubMaybeUsed` useTop) ux) +lubUse Used _ = Used -- Note [Used should win] + +-- `both` is different from `lub` in its treatment of counting; if +-- `both` is computed for two used, the result always has +-- cardinality `Many` (except for the inner demands of UCall demand -- [TODO] explain). +-- Also, x `bothUse` x /= x (for anything but Abs). + +bothMaybeUsed :: MaybeUsed -> MaybeUsed -> MaybeUsed +bothMaybeUsed Abs x = x +bothMaybeUsed x Abs = x +bothMaybeUsed (Use _ a1) (Use _ a2) = Use Many (bothUse a1 a2) + + +bothUse :: UseDmd -> UseDmd -> UseDmd +bothUse UHead u = u +bothUse (UCall c u) UHead = UCall c u + +-- Exciting special treatment of inner demand for call demands: +-- use `lubUse` instead of `bothUse`! +bothUse (UCall _ u1) (UCall _ u2) = UCall Many (u1 `lubUse` u2) + +bothUse (UCall {}) _ = Used +bothUse (UProd ux) UHead = UProd ux +bothUse (UProd ux1) (UProd ux2) + | length ux1 == length ux2 = UProd $ zipWith bothMaybeUsed ux1 ux2 + | otherwise = Used +bothUse (UProd {}) (UCall {}) = Used +-- bothUse (UProd {}) Used = Used -- Note [Used should win] +bothUse Used (UProd ux) = UProd (map (`bothMaybeUsed` useTop) ux) +bothUse (UProd ux) Used = UProd (map (`bothMaybeUsed` useTop) ux) +bothUse Used _ = Used -- Note [Used should win] + +peelUseCall :: UseDmd -> Maybe (Count, UseDmd) +peelUseCall (UCall c u) = Just (c,u) +peelUseCall _ = Nothing +\end{code} --- Splitting polymorphic demands -splitAbsProdDmd :: Int -> AbsDmd -> [AbsDmd] -splitAbsProdDmd n Abs = replicate n Abs -splitAbsProdDmd n Used = replicate n Used -splitAbsProdDmd n UHead = replicate n Abs -splitAbsProdDmd n (UProd ds) = ASSERT( ds `lengthIs` n ) ds -splitAbsProdDmd n (UCall d) = ASSERT( n == 1 ) [d] +Note [Don't optimise UProd(Used) to Used] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +These two UseDmds: + UProd [Used, Used] and Used +are semantically equivalent, but we do not turn the former into +the latter, for a regrettable-subtle reason. Suppose we did. +then + f (x,y) = (y,x) +would get + StrDmd = Str = SProd [Lazy, Lazy] + UseDmd = Used = UProd [Used, Used] +But with the joint demand of doesn't convey any clue +that there is a product involved, and so the worthSplittingFun +will not fire. (We'd need to use the type as well to make it fire.) +Moreover, consider + g h p@(_,_) = h p +This too would get , but this time there really isn't any +point in w/w since the components of the pair are not used at all. + +So the solution is: don't aggressively collapse UProd [Used,Used] to +Used; intead leave it as-is. In effect we are using the UseDmd to do a +little bit of boxity analysis. Not very nice. + +Note [Used should win] +~~~~~~~~~~~~~~~~~~~~~~ +Both in lubUse and bothUse we want (Used `both` UProd us) to be Used. +Why? Because Used carries the implication the whole thing is used, +box and all, so we don't want to w/w it. If we use it both boxed and +unboxed, then we are definitely using the box, and so we are quite +likely to pay a reboxing cost. So we make Used win here. + +Example is in the Buffer argument of GHC.IO.Handle.Internals.writeCharBuffer + +Baseline: (A) Not making Used win (UProd wins) +Compare with: (B) making Used win for lub and both + + Min -0.3% -5.6% -10.7% -11.0% -33.3% + Max +0.3% +45.6% +11.5% +11.5% +6.9% + Geometric Mean -0.0% +0.5% +0.3% +0.2% -0.8% + +Baseline: (B) Making Used win for both lub and both +Compare with: (C) making Used win for both, but UProd win for lub + + Min -0.1% -0.3% -7.9% -8.0% -6.5% + Max +0.1% +1.0% +21.0% +21.0% +0.5% + Geometric Mean +0.0% +0.0% -0.0% -0.1% -0.1% + + +\begin{code} +markAsUsedDmd :: MaybeUsed -> MaybeUsed +markAsUsedDmd Abs = Abs +markAsUsedDmd (Use _ a) = Use Many (markUsed a) + +markUsed :: UseDmd -> UseDmd +markUsed (UCall _ u) = UCall Many u -- No need to recurse here +markUsed (UProd ux) = UProd (map markAsUsedDmd ux) +markUsed u = u + +isUsedMU :: MaybeUsed -> Bool +-- True <=> markAsUsedDmd d = d +isUsedMU Abs = True +isUsedMU (Use One _) = False +isUsedMU (Use Many u) = isUsedU u + +isUsedU :: UseDmd -> Bool +-- True <=> markUsed d = d +isUsedU Used = True +isUsedU UHead = True +isUsedU (UProd us) = all isUsedMU us +isUsedU (UCall One _) = False +isUsedU (UCall Many _) = True -- No need to recurse + +-- Squashing usage demand demands +seqUseDmd :: UseDmd -> () +seqUseDmd (UProd ds) = seqMaybeUsedList ds +seqUseDmd (UCall c d) = c `seq` seqUseDmd d +seqUseDmd _ = () + +seqMaybeUsedList :: [MaybeUsed] -> () +seqMaybeUsedList [] = () +seqMaybeUsedList (d:ds) = seqMaybeUsed d `seq` seqMaybeUsedList ds + +seqMaybeUsed :: MaybeUsed -> () +seqMaybeUsed (Use c u) = c `seq` seqUseDmd u +seqMaybeUsed _ = () + +-- Splitting polymorphic Maybe-Used demands +splitUseProdDmd :: Int -> UseDmd -> [MaybeUsed] +splitUseProdDmd n Used = replicate n useTop +splitUseProdDmd n UHead = replicate n Abs +splitUseProdDmd n (UProd ds) = ASSERT( ds `lengthIs` n ) ds +splitUseProdDmd _ d@(UCall _ _) = pprPanic "attempt to prod-split usage call demand" (ppr d) \end{code} %************************************************************************ @@ -306,7 +441,7 @@ splitAbsProdDmd n (UCall d) = ASSERT( n == 1 ) [d] \begin{code} -data JointDmd = JD { strd :: StrDmd, absd :: AbsDmd } +data JointDmd = JD { strd :: MaybeStr, absd :: MaybeUsed } deriving ( Eq, Show ) -- Pretty-printing @@ -314,91 +449,95 @@ instance Outputable JointDmd where ppr (JD {strd = s, absd = a}) = angleBrackets (ppr s <> char ',' <> ppr a) -- Well-formedness preserving constructors for the joint domain -mkJointDmd :: StrDmd -> AbsDmd -> JointDmd +mkJointDmd :: MaybeStr -> MaybeUsed -> JointDmd mkJointDmd s a = JD { strd = s, absd = a } --- = case (s, a) of --- (HyperStr, UProd _) -> JD {strd = HyperStr, absd = Used} --- _ -> JD {strd = s, absd = a} -mkJointDmds :: [StrDmd] -> [AbsDmd] -> [JointDmd] +mkJointDmds :: [MaybeStr] -> [MaybeUsed] -> [JointDmd] mkJointDmds ss as = zipWithEqual "mkJointDmds" mkJointDmd ss as - -mkProdDmd :: [JointDmd] -> JointDmd -mkProdDmd dx - = mkJointDmd sp up - where - sp = strProd $ map strd dx - up = absProd $ map absd dx absDmd :: JointDmd -absDmd = mkJointDmd strTop absBot +absDmd = mkJointDmd Lazy Abs topDmd :: JointDmd -topDmd = mkJointDmd strTop absTop +topDmd = mkJointDmd Lazy useTop + +seqDmd :: JointDmd +seqDmd = mkJointDmd (Str HeadStr) (Use One UHead) botDmd :: JointDmd -botDmd = mkJointDmd strBot absBot +botDmd = mkJointDmd strBot useBot lubDmd :: JointDmd -> JointDmd -> JointDmd lubDmd (JD {strd = s1, absd = a1}) - (JD {strd = s2, absd = a2}) = mkJointDmd (lubStr s1 s2) (lubAbs a1 a2) + (JD {strd = s2, absd = a2}) = mkJointDmd (s1 `lubMaybeStr` s2) (a1 `lubMaybeUsed` a2) bothDmd :: JointDmd -> JointDmd -> JointDmd bothDmd (JD {strd = s1, absd = a1}) - (JD {strd = s2, absd = a2}) = mkJointDmd (bothStr s1 s2) (bothAbs a1 a2) + (JD {strd = s2, absd = a2}) = mkJointDmd (s1 `bothMaybeStr` s2) (a1 `bothMaybeUsed` a2) isTopDmd :: JointDmd -> Bool -isTopDmd (JD {strd = Lazy, absd = Used}) = True -isTopDmd _ = False +isTopDmd (JD {strd = Lazy, absd = Use Many Used}) = True +isTopDmd _ = False isBotDmd :: JointDmd -> Bool -isBotDmd (JD {strd = HyperStr, absd = Abs}) = True -isBotDmd _ = False +isBotDmd (JD {strd = Str HyperStr, absd = Abs}) = True +isBotDmd _ = False isAbsDmd :: JointDmd -> Bool isAbsDmd (JD {absd = Abs}) = True -- The strictness part can be HyperStr isAbsDmd _ = False -- for a bottom demand isSeqDmd :: JointDmd -> Bool -isSeqDmd (JD {strd=Str, absd=UHead}) = True -isSeqDmd _ = False +isSeqDmd (JD {strd=Str HeadStr, absd=Use _ UHead}) = True +isSeqDmd _ = False -- More utility functions for strictness seqDemand :: JointDmd -> () -seqDemand (JD {strd = x, absd = y}) = x `seq` y `seq` () +seqDemand (JD {strd = x, absd = y}) = seqMaybeStr x `seq` seqMaybeUsed y `seq` () seqDemandList :: [JointDmd] -> () seqDemandList [] = () seqDemandList (d:ds) = seqDemand d `seq` seqDemandList ds +deferDmd :: JointDmd -> JointDmd +deferDmd (JD {absd = a}) = mkJointDmd Lazy a + isStrictDmd :: Demand -> Bool -- See Note [Strict demands] isStrictDmd (JD {absd = Abs}) = False isStrictDmd (JD {strd = Lazy}) = False isStrictDmd _ = True +isWeakDmd :: Demand -> Bool +isWeakDmd (JD {strd = s, absd = a}) = isLazy s && isUsedMU a -isUsedDmd :: Demand -> Bool -isUsedDmd (JD {absd = x}) = isUsed x - -isUsed :: AbsDmd -> Bool -isUsed x = x /= absBot +useDmd :: JointDmd -> JointDmd +useDmd (JD {strd=d, absd=a}) = mkJointDmd d (markAsUsedDmd a) -someCompUsed :: AbsDmd -> Bool -someCompUsed Used = True -someCompUsed (UProd _) = True -someCompUsed _ = False +cleanUseDmd_maybe :: JointDmd -> Maybe UseDmd +cleanUseDmd_maybe (JD { absd = Use _ ud }) = Just ud +cleanUseDmd_maybe _ = Nothing -evalDmd :: JointDmd --- Evaluated strictly, and used arbitrarily deeply -evalDmd = mkJointDmd strStr absTop +splitFVs :: Bool -- Thunk + -> DmdEnv -> (DmdEnv, DmdEnv) +splitFVs is_thunk rhs_fvs + | is_thunk = foldUFM_Directly add (emptyVarEnv, emptyVarEnv) rhs_fvs + | otherwise = partitionVarEnv isWeakDmd rhs_fvs + where + add uniq dmd@(JD { strd = s, absd = u }) (lazy_fv, sig_fv) + | Lazy <- s = (addToUFM_Directly lazy_fv uniq dmd, sig_fv) + | otherwise = ( addToUFM_Directly lazy_fv uniq (JD { strd = Lazy, absd = u }) + , addToUFM_Directly sig_fv uniq (JD { strd = s, absd = Abs }) ) +\end{code} -defer :: Demand -> Demand -defer (JD {absd = a}) = mkJointDmd strTop a +%************************************************************************ +%* * +\subsection{Clean demand for Strictness and Usage} +%* * +%************************************************************************ --- use :: Demand -> Demand --- use (JD {strd = d}) = mkJointDmd d top -\end{code} +This domain differst from JointDemand in the sence that pure absence +is taken away, i.e., we deal *only* with non-absent demands. Note [Strict demands] ~~~~~~~~~~~~~~~~~~~~~ @@ -430,53 +569,97 @@ f g = (snd (g 3), True) should be: m + \begin{code} -mkCallDmd :: JointDmd -> JointDmd -mkCallDmd (JD {strd = d, absd = a}) = mkJointDmd (strCall d) (absCall a) -peelCallDmd :: JointDmd -> Maybe JointDmd +data CleanDemand = CD { sd :: StrDmd, ud :: UseDmd } + deriving ( Eq, Show ) + +instance Outputable CleanDemand where + ppr (CD {sd = s, ud = a}) = angleBrackets (ppr s <> comma <> ppr a) + +mkCleanDmd :: StrDmd -> UseDmd -> CleanDemand +mkCleanDmd s a = CD { sd = s, ud = a } + +bothCleanDmd :: CleanDemand -> CleanDemand -> CleanDemand +bothCleanDmd (CD { sd = s1, ud = a1}) (CD { sd = s2, ud = a2}) + = CD { sd = s1 `bothStr` s2, ud = a1 `bothUse` a2 } + +mkHeadStrict :: CleanDemand -> CleanDemand +mkHeadStrict (CD { ud = a }) = mkCleanDmd HeadStr a + +oneifyDmd :: JointDmd -> JointDmd +oneifyDmd (JD { strd = s, absd = Use _ a }) = JD { strd = s, absd = Use One a } +oneifyDmd jd = jd + +mkOnceUsedDmd, mkManyUsedDmd :: CleanDemand -> JointDmd +mkOnceUsedDmd (CD {sd = s,ud = a}) = mkJointDmd (Str s) (Use One a) +mkManyUsedDmd (CD {sd = s,ud = a}) = mkJointDmd (Str s) (Use Many a) + +getUsage :: CleanDemand -> UseDmd +getUsage = ud + +evalDmd :: JointDmd +-- Evaluated strictly, and used arbitrarily deeply +evalDmd = mkJointDmd (Str HeadStr) useTop + +mkProdDmd :: [JointDmd] -> CleanDemand +mkProdDmd dx + = mkCleanDmd sp up + where + sp = mkSProd $ map strd dx + up = mkUProd $ map absd dx + +mkCallDmd :: CleanDemand -> CleanDemand +mkCallDmd (CD {sd = d, ud = u}) + = mkCleanDmd (mkSCall d) (mkUCall One u) + +-- Returns result demand * strictness flag * one-shotness of the call +peelCallDmd :: CleanDemand + -> ( CleanDemand + , Bool -- True <=> had to strengthen from HeadStr + -- hence defer results + , Count) -- Call count + -- Exploiting the fact that -- on the strictness side C(B) = B -- and on the usage side C(U) = U -peelCallDmd (JD {strd = s, absd = u}) - | Just s' <- peel_s s - , Just u' <- peel_u u - = Just $ mkJointDmd s' u' - | otherwise - = Nothing +peelCallDmd (CD {sd = s, ud = u}) + = let (s', b) = peel_s s + (u', c) = peel_u u + in (mkCleanDmd s' u', b, c) + where + peel_s (SCall s) = (s, False) + peel_s HyperStr = (HyperStr, False) + peel_s _ = (HeadStr, True) + + peel_u (UCall c u) = (u, c) + peel_u _ = (Used, Many) + -- The last case includes UHead which seems a bit wrong + -- because the body isn't used at all! + +cleanEvalDmd :: CleanDemand +cleanEvalDmd = mkCleanDmd HeadStr Used + +cleanEvalProdDmd :: Arity -> CleanDemand +cleanEvalProdDmd n = mkCleanDmd HeadStr (UProd (replicate n useTop)) + +isSingleUsed :: JointDmd -> Bool +isSingleUsed (JD {absd=a}) = is_used_once a where - peel_s (SCall s) = Just s - peel_s HyperStr = Just HyperStr - peel_s _ = Nothing - - peel_u (UCall u) = Just u - peel_u Used = Just Used - peel_u Abs = Just Abs - peel_u UHead = Just Abs - peel_u _ = Nothing - -splitCallDmd :: JointDmd -> (Int, JointDmd) -splitCallDmd (JD {strd = SCall d, absd = UCall a}) - = case splitCallDmd (mkJointDmd d a) of - (n, r) -> (n + 1, r) --- Exploiting the fact that C(U) === U -splitCallDmd (JD {strd = SCall d, absd = Used}) - = case splitCallDmd (mkJointDmd d Used) of - (n, r) -> (n + 1, r) -splitCallDmd d = (0, d) - -vanillaCall :: Arity -> Demand -vanillaCall 0 = evalDmd -vanillaCall n = - -- generate S^n (S) - let strComp = (iterate strCall strStr) !! n - absComp = (iterate absCall absTop) !! n - in mkJointDmd strComp absComp + is_used_once Abs = True + is_used_once (Use One _) = True + is_used_once _ = False \end{code} +Note [Threshold demands] +~~~~~~~~~~~~~~~~~~~~~~~~ +Threshold usage demand is generated to figure out if +cardinality-instrumented demands of a binding's free variables should +be unleashed. See also [Aggregated demand for cardinality]. + Note [Replicating polymorphic demands] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Some demands can be considered as polymorphic. Generally, it is applicable to such beasts as tops, bottoms as well as Head-Used adn Head-stricts demands. For instance, @@ -488,32 +671,26 @@ can be expanded to saturate a callee's arity. \begin{code} -splitProdDmd :: Int -> Demand -> [Demand] --- Split a product demands into its components, --- regardless of whether it has juice in it --- The demand is not ncessarily strict -splitProdDmd n (JD {strd=x, absd=y}) - = mkJointDmds (splitStrProdDmd n x) (splitAbsProdDmd n y) - -splitProdDmd_maybe :: Demand -> Maybe [Demand] +splitProdDmd :: Arity -> JointDmd -> [JointDmd] +splitProdDmd n (JD {strd = s, absd = u}) + = mkJointDmds (split_str s) (split_abs u) + where + split_str Lazy = replicate n Lazy + split_str (Str s) = splitStrProdDmd n s + + split_abs Abs = replicate n Abs + split_abs (Use _ u) = splitUseProdDmd n u + +splitProdDmd_maybe :: JointDmd -> Maybe [JointDmd] -- Split a product into its components, iff there is any -- useful information to be extracted thereby -- The demand is not necessarily strict! -splitProdDmd_maybe JD {strd=SProd sx, absd=UProd ux} - = ASSERT( sx `lengthIs` length ux ) - Just (mkJointDmds sx ux) -splitProdDmd_maybe JD {strd=SProd sx, absd=u} - = Just (mkJointDmds sx (splitAbsProdDmd (length sx) u)) -splitProdDmd_maybe (JD {strd=s, absd=UProd ux}) - = Just (mkJointDmds (splitStrProdDmd (length ux) s) ux) -splitProdDmd_maybe _ = Nothing - --- Check whether is a product demand with *some* useful info inside --- The demand is not ncessarily strict -isProdDmd :: Demand -> Bool -isProdDmd (JD {strd = SProd _}) = True -isProdDmd (JD {absd = UProd _}) = True -isProdDmd _ = False +splitProdDmd_maybe (JD {strd = s, absd = u}) + = case (s,u) of + (Str (SProd sx), Use _ u) -> Just (mkJointDmds sx (splitUseProdDmd (length sx) u)) + (Str s, Use _ (UProd ux)) -> Just (mkJointDmds (splitStrProdDmd (length ux) s) ux) + (Lazy, Use _ (UProd ux)) -> Just (mkJointDmds (replicate (length ux) Lazy) ux) + _ -> Nothing \end{code} %************************************************************************ @@ -545,7 +722,8 @@ lubCPR _ _ = NoCPR bothCPR :: CPRResult -> CPRResult -> CPRResult -- See Note [Asymmetry of 'both' for DmdType and DmdResult] -bothCPR r _ = r +bothCPR _ BotCPR = BotCPR -- If either diverges, we diverge +bothCPR r _ = r instance Outputable DmdResult where ppr RetProd = char 'm' @@ -580,7 +758,6 @@ cprProdRes :: DmdResult cprProdRes | opt_CprOff = topRes | otherwise = RetProd - isTopRes :: DmdResult -> Bool isTopRes NoCPR = True isTopRes _ = False @@ -601,7 +778,7 @@ returnsCPR_maybe (RetSum t) = Just t returnsCPR_maybe (RetProd) = Just fIRST_TAG returnsCPR_maybe _ = Nothing -resTypeArgDmd :: DmdResult -> Demand +resTypeArgDmd :: DmdResult -> JointDmd -- TopRes and BotRes are polymorphic, so that -- BotRes === Bot -> BotRes === ... -- TopRes === Top -> TopRes === ... @@ -617,35 +794,41 @@ resTypeArgDmd _ = topDmd %************************************************************************ \begin{code} -worthSplittingFun :: [Demand] -> DmdResult -> Bool +worthSplittingFun :: [JointDmd] -> DmdResult -> Bool -- True <=> the wrapper would not be an identity function worthSplittingFun ds res = any worth_it ds || returnsCPR res -- worthSplitting returns False for an empty list of demands, -- and hence do_strict_ww is False if arity is zero and there is no CPR where - worth_it (JD {absd=Abs}) = True -- Absent arg + worth_it (JD {absd=Abs}) = True -- Absent arg -- See Note [Worker-wrapper for bottoming functions] - worth_it (JD {strd=HyperStr, absd=UProd _}) = True + worth_it (JD {strd=Str HyperStr, absd=Use _ (UProd _)}) = True -- See Note [Worthy functions for Worker-Wrapper split] - worth_it (JD {strd=SProd _}) = True -- Product arg to evaluate - worth_it (JD {strd=Str, absd=UProd _}) = True -- Strictly used product arg - worth_it (JD {strd=Str, absd=UHead}) = True - worth_it _ = False + worth_it (JD {strd=Str (SProd {})}) = True -- Product arg to evaluate + worth_it (JD {strd=Str HeadStr, absd=Use _ (UProd _)}) = True -- Strictly used product arg + worth_it (JD {strd=Str HeadStr, absd=Use _ UHead}) = True + worth_it _ = False -worthSplittingThunk :: Demand -- Demand on the thunk +worthSplittingThunk :: JointDmd -- Demand on the thunk -> DmdResult -- CPR info for the thunk -> Bool worthSplittingThunk dmd res = worth_it dmd || returnsCPR res where -- Split if the thing is unpacked - worth_it (JD {strd=SProd _, absd=a}) = someCompUsed a - worth_it (JD {strd=Str, absd=UProd _}) = True + worth_it (JD {strd=Str (SProd {}), absd=Use _ a}) = some_comp_used a + worth_it (JD {strd=Str HeadStr, absd=Use _ UProd {}}) = True -- second component points out that at least some of - worth_it _ = False + worth_it _ = False + + some_comp_used Used = True + some_comp_used (UProd _ ) = True + some_comp_used _ = False + + \end{code} Note [Worthy functions for Worker-Wrapper split] @@ -823,6 +1006,8 @@ bothDmdType (DmdType fv1 ds1 r1) (DmdType fv2 _ r2) both_fv1 = modifyEnv (isBotRes r1) (`bothDmd` botDmd) fv2 fv1 both_fv both_fv2 = modifyEnv (isBotRes r2) (`bothDmd` botDmd) fv1 fv2 both_fv1 +bothDmdEnv :: DmdEnv -> DmdEnv -> DmdEnv +bothDmdEnv = plusVarEnv_C bothDmd instance Outputable DmdType where ppr (DmdType fv ds res) @@ -846,8 +1031,8 @@ cprProdDmdType = DmdType emptyDmdEnv [] cprProdRes isTopDmdType :: DmdType -> Bool isTopDmdType (DmdType env [] res) - | isTopRes res && isEmptyVarEnv env = True -isTopDmdType _ = False + | isTopRes res && isEmptyVarEnv env = True +isTopDmdType _ = False mkDmdType :: DmdEnv -> [Demand] -> DmdResult -> DmdType mkDmdType fv ds res = DmdType fv ds res @@ -869,11 +1054,29 @@ splitDmdTy :: DmdType -> (Demand, DmdType) splitDmdTy (DmdType fv (dmd:dmds) res_ty) = (dmd, DmdType fv dmds res_ty) splitDmdTy ty@(DmdType _ [] res_ty) = (resTypeArgDmd res_ty, ty) +deferAndUse :: Bool -- Lazify (defer) the type + -> Count -- Many => manify the type + -> DmdType -> DmdType +deferAndUse True Many ty = deferType (useType ty) +deferAndUse False Many ty = useType ty +deferAndUse True One ty = deferType ty +deferAndUse False One ty = ty + deferType :: DmdType -> DmdType +-- deferType ty1 == ty1 `lubType` DT { v -> } [] top } +-- Ie it might be used, or not deferType (DmdType fv _ _) = DmdType (deferEnv fv) [] topRes deferEnv :: DmdEnv -> DmdEnv -deferEnv fv = mapVarEnv defer fv +deferEnv fv = mapVarEnv deferDmd fv + +useType :: DmdType -> DmdType +-- useType ty1 == ty1 `bothType` ty1 +-- NB that bothType is assymetrical, so no-op on argument demands +useType (DmdType fv ds res_ty) = DmdType (useEnv fv) ds res_ty + +useEnv :: DmdEnv -> DmdEnv +useEnv fv = mapVarEnv useDmd fv modifyEnv :: Bool -- No-op if False -> (Demand -> Demand) -- The zapper @@ -889,8 +1092,73 @@ modifyEnv need_to_modify zapper env1 env2 env where current_val = expectJust "modifyEnv" (lookupUFM_Directly env uniq) +strictenDmd :: JointDmd -> CleanDemand +strictenDmd (JD {strd = s, absd = u}) + = CD { sd = poke_s s, ud = poke_u u } + where + poke_s Lazy = HeadStr + poke_s (Str s) = s + poke_u Abs = UHead + poke_u (Use _ u) = u + +toCleanDmd :: (CleanDemand -> e -> (DmdType, e)) + -> Demand + -> e -> (DmdType, e) +-- See Note [Analyzing with lazy demand and lambdas] +toCleanDmd anal (JD { strd = s, absd = u }) e + = case (s,u) of + (_, Abs) -> mf (const topDmdType) (anal (CD { sd = HeadStr, ud = Used }) e) + -- See Note [Always analyse in virgin pass] + + (Str s', Use c u') -> mf (deferAndUse False c) (anal (CD { sd = s', ud = u' }) e) + (Lazy, Use c u') -> mf (deferAndUse True c) (anal (CD { sd = HeadStr, ud = u' }) e) + where + mf f (a,b) = (f a, b) \end{code} +Note [Always analyse in virgin pass] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Tricky point: make sure that we analyse in the 'virgin' pass. Consider + rec { f acc x True = f (...rec { g y = ...g... }...) + f acc x False = acc } +In the virgin pass for 'f' we'll give 'f' a very strict (bottom) type. +That might mean that we analyse the sub-expression containing the +E = "...rec g..." stuff in a bottom demand. Suppose we *didn't analyse* +E, but just retuned botType. + +Then in the *next* (non-virgin) iteration for 'f', we might analyse E +in a weaker demand, and that will trigger doing a fixpoint iteration +for g. But *because it's not the virgin pass* we won't start g's +iteration at bottom. Disaster. (This happened in $sfibToList' of +nofib/spectral/fibheaps.) + +So in the virgin pass we make sure that we do analyse the expression +at least once, to initialise its signatures. + +Note [Analyzing with lazy demand and lambdas] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The insight for analyzing lambdas follows from the fact that for +strictness S = C(L). This polymorphic expansion is critical for +cardinality analysis of the following example: + +{-# NOINLINE build #-} +build g = (g (:) [], g (:) []) + +h c z = build (\x -> + let z1 = z ++ z + in if c + then \y -> x (y ++ z1) + else \y -> x (z1 ++ y)) + +One can see that `build` assigns to `g` demand . +Therefore, when analyzing the lambda `(\x -> ...)`, we +expect each lambda \y -> ... to be annotated as "one-shot" +one. Therefore (\x -> \y -> x (y ++ z)) should be analyzed with a +demand . + +This is achieved by, first, converting the lazy demand L into the +strict S by the second clause of the analysis. + %************************************************************************ %* * Demand signatures @@ -905,8 +1173,9 @@ a demand on the Id into a DmdType, which gives c) an indication of the result of applying the Id to its arguments -However, in fact we store in the Id an extremely emascuated demand transfomer, -namely +However, in fact we store in the Id an extremely emascuated demand +transfomer, namely + a single DmdType (Nevertheless we dignify StrictSig as a distinct type.) @@ -959,42 +1228,85 @@ botSig = StrictSig botDmdType cprProdSig :: StrictSig cprProdSig = StrictSig cprProdDmdType -dmdTransformSig :: StrictSig -> Demand -> DmdType +argsOneShots :: StrictSig -> Arity -> [[Bool]] +argsOneShots (StrictSig (DmdType _ arg_ds _)) n_val_args + | arg_ds `lengthExceeds` n_val_args + = [] -- Too few arguments + | otherwise + = go arg_ds + where + go [] = [] + go (arg_d : arg_ds) = argOneShots arg_d `cons` go arg_ds + + cons [] [] = [] + cons a as = a:as + +argOneShots :: JointDmd -> [Bool] +argOneShots (JD { absd = usg }) + = case usg of + Use _ arg_usg -> go arg_usg + _ -> [] + where + go (UCall One u) = True : go u + go (UCall Many u) = False : go u + go _ = [] + +dmdTransformSig :: StrictSig -> CleanDemand -> DmdType -- (dmdTransformSig fun_sig dmd) considers a call to a function whose -- signature is fun_sig, with demand dmd. We return the demand -- that the function places on its context (eg its args) -dmdTransformSig (StrictSig dmd_ty@(DmdType _ arg_ds _)) dmd - = go arg_ds dmd +dmdTransformSig (StrictSig dmd_ty@(DmdType _ arg_ds _)) + (CD { sd = str, ud = abs }) + = dmd_ty2 where - go [] dmd - | isBotDmd dmd = botDmdType -- Transform bottom demand to bottom type - | otherwise = dmd_ty -- Saturated - go (_:as) dmd = case peelCallDmd dmd of - Just dmd' -> go as dmd' - Nothing -> deferType dmd_ty - -- NB: it's important to use deferType, and not just return topDmdType - -- Consider let { f x y = p + x } in f 1 - -- The application isn't saturated, but we must nevertheless propagate - -- a lazy demand for p! - -dmdTransformDataConSig :: Arity -> StrictSig -> Demand -> DmdType + dmd_ty1 | str_sat = dmd_ty + | otherwise = deferType dmd_ty + dmd_ty2 | abs_sat = dmd_ty1 + | otherwise = useType dmd_ty1 + + str_sat = go_str arg_ds str + abs_sat = go_abs arg_ds abs + + go_str [] _ = True + go_str (_:_) HyperStr = True -- HyperStr = Call(HyperStr) + go_str (_:as) (SCall d') = go_str as d' + go_str _ _ = False + + go_abs [] _ = True + go_abs (_:as) (UCall One d') = go_abs as d' + go_abs _ _ = False + + -- NB: it's important to use deferType, and not just return topDmdType + -- Consider let { f x y = p + x } in f 1 + -- The application isn't saturated, but we must nevertheless propagate + -- a lazy demand for p! + +dmdTransformDataConSig :: Arity -> StrictSig -> CleanDemand -> DmdType -- Same as dmdTranformSig but for a data constructor (worker), -- which has a special kind of demand transformer. -- If the constructor is saturated, we feed the demand on -- the result into the constructor arguments. -dmdTransformDataConSig arity (StrictSig (DmdType _ _ con_res)) dmd - = go arity dmd - where - go 0 dmd = DmdType emptyDmdEnv (splitProdDmd arity dmd) con_res +dmdTransformDataConSig arity (StrictSig (DmdType _ _ con_res)) + (CD { sd = str, ud = abs }) + | Just str_dmds <- go_str arity str + , Just abs_dmds <- go_abs arity abs + = DmdType emptyDmdEnv (mkJointDmds str_dmds abs_dmds) con_res -- Must remember whether it's a product, hence con_res, not TopRes - go n dmd = case peelCallDmd dmd of - Nothing -> topDmdType - Just dmd' -> go (n-1) dmd' + + | otherwise -- Not saturated + = topDmdType + where + go_str 0 dmd = Just (splitStrProdDmd arity dmd) + go_str n (SCall s') = go_str (n-1) s' + go_str _ _ = Nothing + + go_abs 0 dmd = Just (splitUseProdDmd arity dmd) + go_abs n (UCall One u') = go_abs (n-1) u' + go_abs _ _ = Nothing \end{code} Note [Non-full application] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ - If a function having bottom as its demand result is applied to a less number of arguments than its syntactic arity, we cannot say for sure that it is going to diverge. This is the reason why we use the @@ -1003,7 +1315,6 @@ of arguments, says conservatively if the function is going to diverge or not. \begin{code} - -- appIsBottom returns true if an application to n args would diverge appIsBottom :: StrictSig -> Int -> Bool appIsBottom (StrictSig (DmdType _ ds res)) n @@ -1019,6 +1330,49 @@ pprIfaceStrictSig (StrictSig (DmdType _ dmds res)) = hcat (map ppr dmds) <> ppr res \end{code} +Zap absence or one-shot information, under control of flags + +\begin{code} +zapDemand :: DynFlags -> Demand -> Demand +zapDemand dflags dmd + | Just kfs <- killFlags dflags = zap_dmd kfs dmd + | otherwise = dmd + +zapStrictSig :: DynFlags -> StrictSig -> StrictSig +zapStrictSig dflags sig@(StrictSig (DmdType env ds r)) + | Just kfs <- killFlags dflags = StrictSig (DmdType env (map (zap_dmd kfs) ds) r) + | otherwise = sig + +type KillFlags = (Bool, Bool) + +killFlags :: DynFlags -> Maybe KillFlags +killFlags dflags + | not kill_abs && not kill_one_shot = Nothing + | otherwise = Just (kill_abs, kill_one_shot) + where + kill_abs = gopt Opt_KillAbsence dflags + kill_one_shot = gopt Opt_KillOneShot dflags + +zap_dmd :: KillFlags -> Demand -> Demand +zap_dmd kfs (JD {strd = s, absd = u}) = JD {strd = s, absd = zap_musg kfs u} + +zap_musg :: KillFlags -> MaybeUsed -> MaybeUsed +zap_musg (kill_abs, _) Abs + | kill_abs = useTop + | otherwise = Abs +zap_musg kfs (Use c u) = Use (zap_count kfs c) (zap_usg kfs u) + +zap_count :: KillFlags -> Count -> Count +zap_count (_, kill_one_shot) c + | kill_one_shot = Many + | otherwise = c + +zap_usg :: KillFlags -> UseDmd -> UseDmd +zap_usg kfs (UCall c u) = UCall (zap_count kfs c) (zap_usg kfs u) +zap_usg kfs (UProd us) = UProd (map (zap_musg kfs) us) +zap_usg _ u = u +\end{code} + %************************************************************************ %* * @@ -1030,47 +1384,83 @@ pprIfaceStrictSig (StrictSig (DmdType _ dmds res)) \begin{code} instance Binary StrDmd where put_ bh HyperStr = do putByte bh 0 - put_ bh Lazy = do putByte bh 1 - put_ bh Str = do putByte bh 2 - put_ bh (SCall s) = do putByte bh 3 + put_ bh HeadStr = do putByte bh 1 + put_ bh (SCall s) = do putByte bh 2 put_ bh s - put_ bh (SProd sx) = do putByte bh 4 + put_ bh (SProd sx) = do putByte bh 3 put_ bh sx get bh = do h <- getByte bh case h of - 0 -> do return strBot - 1 -> do return strTop - 2 -> do return strStr - 3 -> do s <- get bh - return $ strCall s + 0 -> do return HyperStr + 1 -> do return HeadStr + 2 -> do s <- get bh + return (SCall s) _ -> do sx <- get bh - return $ strProd sx + return (SProd sx) -instance Binary AbsDmd where - put_ bh Abs = do +instance Binary MaybeStr where + put_ bh Lazy = do + putByte bh 0 + put_ bh (Str s) = do + putByte bh 1 + put_ bh s + + get bh = do + h <- getByte bh + case h of + 0 -> return Lazy + _ -> do s <- get bh + return $ Str s + +instance Binary Count where + put_ bh One = do putByte bh 0 + put_ bh Many = do putByte bh 1 + + get bh = do h <- getByte bh + case h of + 0 -> return One + _ -> return Many + +instance Binary MaybeUsed where + put_ bh Abs = do putByte bh 0 - put_ bh Used = do + put_ bh (Use c u) = do putByte bh 1 - put_ bh UHead = do + put_ bh c + put_ bh u + + get bh = do + h <- getByte bh + case h of + 0 -> return Abs + _ -> do c <- get bh + u <- get bh + return $ Use c u + +instance Binary UseDmd where + put_ bh Used = do + putByte bh 0 + put_ bh UHead = do + putByte bh 1 + put_ bh (UCall c u) = do putByte bh 2 - put_ bh (UCall u) = do - putByte bh 3 + put_ bh c put_ bh u - put_ bh (UProd ux) = do - putByte bh 4 + put_ bh (UProd ux) = do + putByte bh 3 put_ bh ux get bh = do h <- getByte bh case h of - 0 -> return absBot - 1 -> return absTop - 2 -> return absHead - 3 -> do u <- get bh - return $ absCall u + 0 -> return $ Used + 1 -> return $ UHead + 2 -> do c <- get bh + u <- get bh + return (UCall c u) _ -> do ux <- get bh - return $ absProd ux + return (UProd ux) instance Binary JointDmd where put_ bh (JD {strd = x, absd = y}) = do put_ bh x; put_ bh y @@ -1110,3 +1500,4 @@ instance Binary CPRResult where 2 -> return NoCPR _ -> return BotCPR \end{code} + diff --git a/compiler/basicTypes/MkId.lhs b/compiler/basicTypes/MkId.lhs index 112664c1e216..218033a4cfcf 100644 --- a/compiler/basicTypes/MkId.lhs +++ b/compiler/basicTypes/MkId.lhs @@ -35,7 +35,7 @@ module MkId ( wiredInIds, ghcPrimIds, unsafeCoerceName, unsafeCoerceId, realWorldPrimId, voidArgId, nullAddrId, seqId, lazyId, lazyIdKey, - coercionTokenId, + coercionTokenId, magicSingIId, -- Re-export error Ids module PrelRules @@ -136,7 +136,8 @@ ghcPrimIds realWorldPrimId, unsafeCoerceId, nullAddrId, - seqId + seqId, + magicSingIId ] \end{code} @@ -320,10 +321,10 @@ mkDictSelId dflags no_unf name clas strict_sig = mkStrictSig (mkTopDmdType [arg_dmd] topRes) arg_dmd | new_tycon = evalDmd - | otherwise = mkProdDmd [ if the_arg_id == id then evalDmd else absDmd + | otherwise = mkManyUsedDmd $ + mkProdDmd [ if the_arg_id == id then evalDmd else absDmd | id <- arg_ids ] - tycon = classTyCon clas new_tycon = isNewTyCon tycon [data_con] = tyConDataCons tycon @@ -346,14 +347,13 @@ mkDictSelId dflags no_unf name clas -- varToCoreExpr needed for equality superclass selectors -- sel a b d = case x of { MkC _ (g:a~b) _ -> CO g } -dictSelRule :: Int -> Arity - -> DynFlags -> Id -> IdUnfoldingFun -> [CoreExpr] -> Maybe CoreExpr +dictSelRule :: Int -> Arity -> RuleFun -- Tries to persuade the argument to look like a constructor -- application, using exprIsConApp_maybe, and then selects -- from it -- sel_i t1..tk (D t1..tk op1 ... opm) = opi -- -dictSelRule val_index n_ty_args _ _ id_unf args +dictSelRule val_index n_ty_args _ id_unf _ args | (dict_arg : _) <- drop n_ty_args args , Just (_, _, con_args) <- exprIsConApp_maybe id_unf dict_arg = Just (getNth con_args val_index) @@ -1023,13 +1023,14 @@ they can unify with both unlifted and lifted types. Hence we provide another gun with which to shoot yourself in the foot. \begin{code} -lazyIdName, unsafeCoerceName, nullAddrName, seqName, realWorldName, coercionTokenName :: Name +lazyIdName, unsafeCoerceName, nullAddrName, seqName, realWorldName, coercionTokenName, magicSingIName :: Name unsafeCoerceName = mkWiredInIdName gHC_PRIM (fsLit "unsafeCoerce#") unsafeCoerceIdKey unsafeCoerceId nullAddrName = mkWiredInIdName gHC_PRIM (fsLit "nullAddr#") nullAddrIdKey nullAddrId seqName = mkWiredInIdName gHC_PRIM (fsLit "seq") seqIdKey seqId realWorldName = mkWiredInIdName gHC_PRIM (fsLit "realWorld#") realWorldPrimIdKey realWorldPrimId lazyIdName = mkWiredInIdName gHC_MAGIC (fsLit "lazy") lazyIdKey lazyId coercionTokenName = mkWiredInIdName gHC_PRIM (fsLit "coercionToken#") coercionTokenIdKey coercionTokenId +magicSingIName = mkWiredInIdName gHC_PRIM (fsLit "magicSingI") magicSingIKey magicSingIId \end{code} \begin{code} @@ -1082,8 +1083,7 @@ seqId = pcMiscPrelId seqName ty info , ru_try = match_seq_of_cast } -match_seq_of_cast :: DynFlags -> Id -> IdUnfoldingFun -> [CoreExpr] - -> Maybe CoreExpr +match_seq_of_cast :: RuleFun -- See Note [Built-in RULES for seq] match_seq_of_cast _ _ _ [Type _, Type res_ty, Cast scrut co, expr] = Just (Var seqId `mkApps` [Type (pFst (coercionKind co)), Type res_ty, @@ -1096,6 +1096,15 @@ lazyId = pcMiscPrelId lazyIdName ty info where info = noCafIdInfo ty = mkForAllTys [alphaTyVar] (mkFunTy alphaTy alphaTy) + + +-------------------------------------------------------------------------------- +magicSingIId :: Id -- See Note [magicSingIId magic] +magicSingIId = pcMiscPrelId magicSingIName ty info + where + info = noCafIdInfo `setInlinePragInfo` neverInlinePragma + ty = mkForAllTys [alphaTyVar] alphaTy + \end{code} Note [Unsafe coerce magic] @@ -1189,6 +1198,45 @@ See Trac #3259 for a real world example. lazyId is defined in GHC.Base, so we don't *have* to inline it. If it appears un-applied, we'll end up just calling it. + +Note [magicSingIId magic] +~~~~~~~~~~~~~~~~~~~~~~~~~ + +The identifier `magicSIngI` is just a place-holder, which is used to +implement a primitve that we cannot define in Haskell but we can write +in Core. It is declared with a place-holder type: + + magicSingI :: forall a. a + +The intention is that the identifier will be used in a very specific way, +namely we add the following to the library: + + withSingI :: Sing n -> (SingI n => a) -> a + withSingI x = magicSingI x ((\f -> f) :: () -> ()) + +The actual primitive is `withSingI`, and it uses its first argument +(of type `Sing n`) as the evidece/dictionary in the second argument. +This is done by adding a built-in rule to `prelude/PrelRules.hs` +(see `match_magicSingI`), which works as follows: + +magicSingI @ (Sing n -> (() -> ()) -> (SingI n -> a) -> a) + x + (\f -> _) + +----> + +\(f :: (SingI n -> a) -> a) -> f (cast x (newtypeCo n)) + +The `newtypeCo` coercion is extracted from the `SingI` type constructor, +which is available in the instantiation. We are casting `Sing n` into `SingI n`, +which is OK because `SingI` is a class with a single methid, +and thus it is implemented as newtype. + +The `(\f -> f)` parameter is there just so that we can avoid +having to make up a new name for the lambda, it is completely +changed by the rewrite. + + ------------------------------------------------------------- @realWorld#@ used to be a magic literal, \tr{void#}. If things get nasty as-is, change it back to a literal (@Literal@). diff --git a/compiler/basicTypes/MkId.lhs-boot b/compiler/basicTypes/MkId.lhs-boot index 201f977e3d2b..fe66599df298 100644 --- a/compiler/basicTypes/MkId.lhs-boot +++ b/compiler/basicTypes/MkId.lhs-boot @@ -9,6 +9,8 @@ data DataConBoxer mkDataConWorkId :: Name -> DataCon -> Id mkPrimOpId :: PrimOp -> Id + +magicSingIId :: Id \end{code} diff --git a/compiler/basicTypes/Name.lhs b/compiler/basicTypes/Name.lhs index e11262568e9d..55edc8d505d6 100644 --- a/compiler/basicTypes/Name.lhs +++ b/compiler/basicTypes/Name.lhs @@ -135,15 +135,19 @@ Notes about the NameSorts: 1. Initially, top-level Ids (including locally-defined ones) get External names, and all other local Ids get Internal names -2. Things with a External name are given C static labels, so they finally +2. In any invocation of GHC, an External Name for "M.x" has one and only one + unique. This unique association is ensured via the Name Cache; + see Note [The Name Cache] in IfaceEnv. + +3. Things with a External name are given C static labels, so they finally appear in the .o file's symbol table. They appear in the symbol table in the form M.n. If originally-local things have this property they must be made @External@ first. -3. In the tidy-core phase, a External that is not visible to an importer +4. In the tidy-core phase, a External that is not visible to an importer is changed to Internal, and a Internal that is visible is changed to External -4. A System Name differs in the following ways: +5. A System Name differs in the following ways: a) has unique attached when printing dumps b) unifier eliminates sys tyvars in favour of user provs where possible @@ -272,6 +276,9 @@ mkDerivedInternalName derive_occ uniq (Name { n_occ = occ, n_loc = loc }) -- | Create a name which definitely originates in the given module mkExternalName :: Unique -> Module -> OccName -> SrcSpan -> Name +-- WATCH OUT! External Names should be in the Name Cache +-- (see Note [The Name Cache] in IfaceEnv), so don't just call mkExternalName +-- with some fresh unique without populating the Name Cache mkExternalName uniq mod occ loc = Name { n_uniq = getKeyFastInt uniq, n_sort = External mod, n_occ = occ, n_loc = loc } diff --git a/compiler/basicTypes/OccName.lhs b/compiler/basicTypes/OccName.lhs index df45cdc6580c..2329e5f815bb 100644 --- a/compiler/basicTypes/OccName.lhs +++ b/compiler/basicTypes/OccName.lhs @@ -58,7 +58,8 @@ module OccName ( -- ** Derived 'OccName's isDerivedOccName, - mkDataConWrapperOcc, mkWorkerOcc, mkDefaultMethodOcc, mkGenDefMethodOcc, + mkDataConWrapperOcc, mkWorkerOcc, mkDefaultMethodOcc, + mkGenDefMethodOcc, mkDerivedTyConOcc, mkNewTyCoOcc, mkClassOpAuxOcc, mkCon2TagOcc, mkTag2ConOcc, mkMaxTagOcc, mkClassDataConOcc, mkDictOcc, mkIPOcc, @@ -481,18 +482,12 @@ isValOcc (OccName DataName _) = True isValOcc _ = False isDataOcc (OccName DataName _) = True -isDataOcc (OccName VarName s) - | isLexCon s = pprPanic "isDataOcc: check me" (ppr s) - -- Jan06: I don't think this should happen isDataOcc _ = False -- | Test if the 'OccName' is a data constructor that starts with -- a symbol (e.g. @:@, or @[]@) isDataSymOcc :: OccName -> Bool isDataSymOcc (OccName DataName s) = isLexConSym s -isDataSymOcc (OccName VarName s) - | isLexConSym s = pprPanic "isDataSymOcc: check me" (ppr s) - -- Jan06: I don't think this should happen isDataSymOcc _ = False -- Pretty inefficient! @@ -574,8 +569,8 @@ isDerivedOccName occ = \end{code} \begin{code} -mkDataConWrapperOcc, mkWorkerOcc, mkDefaultMethodOcc, mkGenDefMethodOcc, - mkDerivedTyConOcc, mkClassDataConOcc, mkDictOcc, +mkDataConWrapperOcc, mkWorkerOcc, mkDefaultMethodOcc, + mkGenDefMethodOcc, mkDerivedTyConOcc, mkClassDataConOcc, mkDictOcc, mkIPOcc, mkSpecOcc, mkForeignExportOcc, mkGenOcc1, mkGenOcc2, mkGenD, mkGenR, mkGen1R, mkGenRCo, mkDataTOcc, mkDataCOcc, mkDataConWorkerOcc, mkNewTyCoOcc, diff --git a/compiler/cmm/CLabel.hs b/compiler/cmm/CLabel.hs index 1bcb695020e2..1b86f3d6b41c 100644 --- a/compiler/cmm/CLabel.hs +++ b/compiler/cmm/CLabel.hs @@ -1030,9 +1030,9 @@ pprCLbl (RtsLabel (RtsApFast str)) = ftext str <> ptext (sLit "_fast") pprCLbl (RtsLabel (RtsSelectorInfoTable upd_reqd offset)) = hcat [ptext (sLit "stg_sel_"), text (show offset), - ptext (if upd_reqd - then (sLit "_upd_info") - else (sLit "_noupd_info")) + ptext (if upd_reqd + then (sLit "_upd_info") + else (sLit "_noupd_info")) ] pprCLbl (RtsLabel (RtsSelectorEntry upd_reqd offset)) diff --git a/compiler/cmm/CmmParse.y b/compiler/cmm/CmmParse.y index e9914bd13450..8c36deafbb3a 100644 --- a/compiler/cmm/CmmParse.y +++ b/compiler/cmm/CmmParse.y @@ -1005,8 +1005,7 @@ stmtMacros = listToUFM [ tickyAllocPAP goods slop ), ( fsLit "TICK_ALLOC_UP_THK", \[goods,slop] -> tickyAllocThunk goods slop ), - ( fsLit "UPD_BH_UPDATABLE", \[reg] -> emitBlackHoleCode False reg ), - ( fsLit "UPD_BH_SINGLE_ENTRY", \[reg] -> emitBlackHoleCode True reg ) + ( fsLit "UPD_BH_UPDATABLE", \[reg] -> emitBlackHoleCode reg ) ] emitPushUpdateFrame :: CmmExpr -> CmmExpr -> FCode () diff --git a/compiler/cmm/CmmUtils.hs b/compiler/cmm/CmmUtils.hs index 78e5562d81bd..a5acffb2f7fc 100644 --- a/compiler/cmm/CmmUtils.hs +++ b/compiler/cmm/CmmUtils.hs @@ -424,7 +424,7 @@ ofBlockMap entry bodyMap = CmmGraph {g_entry=entry, g_graph=GMany NothingO bodyM insertBlock :: CmmBlock -> BlockEnv CmmBlock -> BlockEnv CmmBlock insertBlock block map = - ASSERT (isNothing $ mapLookup id map) + ASSERT(isNothing $ mapLookup id map) mapInsert id block map where id = entryLabel block diff --git a/compiler/codeGen/StgCmmBind.hs b/compiler/codeGen/StgCmmBind.hs index 8d0a35ff4fa4..eb1bfe93e716 100644 --- a/compiler/codeGen/StgCmmBind.hs +++ b/compiler/codeGen/StgCmmBind.hs @@ -130,8 +130,7 @@ cgBind (StgNonRec name rhs) = do { (info, fcode) <- cgRhs name rhs ; addBindC (cg_id info) info ; init <- fcode - ; emit init - } + ; emit init } -- init cannot be used in body, so slightly better to sink it eagerly cgBind (StgRec pairs) @@ -209,9 +208,34 @@ cgRhs id (StgRhsCon cc con args) buildDynCon id True cc con args cgRhs name (StgRhsClosure cc bi fvs upd_flag _srt args body) + | null fvs -- See Note [Nested constant closures] + = do { (info, fcode) <- cgTopRhsClosure Recursive name cc bi upd_flag args body + ; return (info, fcode >> return mkNop) } + | otherwise = do dflags <- getDynFlags mkRhsClosure dflags name cc bi (nonVoidIds fvs) upd_flag args body +{- Note [Nested constant closures] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +If we have + f x = let funny = not True + in ... +then 'funny' is a nested closure (compiled with cgRhs) that has no free vars. +This does not happen often, because let-floating takes them all to top +level; but it CAN happen. (Reason: let-floating may make a function f smaller +so it can be inlined, so now (f True) may generate a local no-fv closure. +This actually happened during bootsrapping GHC itself, with f=mkRdrFunBind +in TcGenDeriv.) + +If we have one of these things, AND they allocate, the heap check will +refer to the static funny_closure; but there isn't one! (Why does the +heap check refer to the static closure? Becuase nodeMustPointToIt is +False, which is fair enough.) + +Simple solution: compile the RHS as if it was top level. Then +everything works. A minor benefit is eliminating the allocation code +too. -} + ------------------------------------------------------------------------ -- Non-constructor right hand sides ------------------------------------------------------------------------ @@ -399,7 +423,7 @@ cgRhsStdThunk bndr lf_info payload -- ; (use_cc, blame_cc) <- chooseDynCostCentres cc [{- no args-}] body ; let use_cc = curCCS; blame_cc = curCCS - ; tickyEnterStdThunk + ; tickyEnterStdThunk closure_info -- BUILD THE OBJECT ; let info_tbl = mkCmmInfo closure_info @@ -547,8 +571,9 @@ thunkCode cl_info fv_details _cc node arity body ; entryHeapCheck cl_info node' arity [] $ do { -- Overwrite with black hole if necessary -- but *after* the heap-overflow check + ; tickyEnterThunk cl_info ; when (blackHoleOnEntry cl_info && node_points) - (blackHoleIt cl_info node) + (blackHoleIt node) -- Push update frame ; setupUpdate cl_info node $ @@ -556,7 +581,7 @@ thunkCode cl_info fv_details _cc node arity body -- that cc of enclosing scope will be recorded -- in update frame CAF/DICT functions will be -- subsumed by this enclosing cc - do { tickyEnterThunk + do { tickyEnterThunk cl_info ; enterCostCentreThunk (CmmReg nodeReg) ; let lf_info = closureLFInfo cl_info ; fv_bindings <- mapM bind_fv fv_details @@ -568,14 +593,14 @@ thunkCode cl_info fv_details _cc node arity body -- Update and black-hole wrappers ------------------------------------------------------------------------ -blackHoleIt :: ClosureInfo -> LocalReg -> FCode () +blackHoleIt :: LocalReg -> FCode () -- Only called for closures with no args -- Node points to the closure -blackHoleIt closure_info node - = emitBlackHoleCode (closureSingleEntry closure_info) (CmmReg (CmmLocal node)) +blackHoleIt node_reg + = emitBlackHoleCode (CmmReg (CmmLocal node_reg)) -emitBlackHoleCode :: Bool -> CmmExpr -> FCode () -emitBlackHoleCode is_single_entry node = do +emitBlackHoleCode :: CmmExpr -> FCode () +emitBlackHoleCode node = do dflags <- getDynFlags -- Eager blackholing is normally disabled, but can be turned on with @@ -603,7 +628,6 @@ emitBlackHoleCode is_single_entry node = do -- work with profiling. when eager_blackholing $ do - tickyBlackHole (not is_single_entry) emitStore (cmmOffsetW dflags node (fixedHdrSize dflags)) (CmmReg (CmmGlobal CurrentTSO)) emitPrimCall [] MO_WriteBarrier [] @@ -614,7 +638,7 @@ setupUpdate :: ClosureInfo -> LocalReg -> FCode () -> FCode () -- so that the cost centre in the original closure can still be -- extracted by a subsequent enterCostCentre setupUpdate closure_info node body - | closureReEntrant closure_info + | not (lfUpdatable (closureLFInfo closure_info)) = body | not (isStaticClosure closure_info) diff --git a/compiler/codeGen/StgCmmClosure.hs b/compiler/codeGen/StgCmmClosure.hs index a057484d3929..04749e9da1a4 100644 --- a/compiler/codeGen/StgCmmClosure.hs +++ b/compiler/codeGen/StgCmmClosure.hs @@ -519,7 +519,7 @@ getCallMethod _ _name _ (LFUnknown True) _n_args = SlowCall -- might be a function getCallMethod _ name _ (LFUnknown False) n_args - = ASSERT2 ( n_args == 0, ppr name <+> ppr n_args ) + = ASSERT2( n_args == 0, ppr name <+> ppr n_args ) EnterIt -- Not a function getCallMethod _ _name _ LFBlackHole _n_args diff --git a/compiler/codeGen/StgCmmMonad.hs b/compiler/codeGen/StgCmmMonad.hs index 1f3d5c488663..3f361e3f518e 100644 --- a/compiler/codeGen/StgCmmMonad.hs +++ b/compiler/codeGen/StgCmmMonad.hs @@ -76,7 +76,6 @@ import Outputable import Control.Monad import Data.List import Prelude hiding( sequence, succ ) -import qualified Prelude( sequence ) infixr 9 `thenC` -- Right-associative! infixr 9 `thenFC` @@ -553,8 +552,8 @@ forkStatics :: FCode a -> FCode a -- The Abstract~C returned is attached to the current state, but the -- bindings and usage information is otherwise unchanged. forkStatics body_code - = do { dflags <- getDynFlags - ; info <- getInfoDown + = do { dflags <- getDynFlags + ; info <- getInfoDown ; us <- newUniqSupply ; state <- getState ; let rhs_info_down = info { cgd_statics = cgs_binds state diff --git a/compiler/codeGen/StgCmmTicky.hs b/compiler/codeGen/StgCmmTicky.hs index 004c940d5407..6c606476c416 100644 --- a/compiler/codeGen/StgCmmTicky.hs +++ b/compiler/codeGen/StgCmmTicky.hs @@ -133,7 +133,7 @@ import TyCon import Data.Maybe import qualified Data.Char -import Control.Monad ( when ) +import Control.Monad ( unless, when ) ----------------------------------------------------------------------------- -- @@ -238,15 +238,24 @@ tickyEnterDynCon = ifTicky $ bumpTickyCounter (fsLit "ENT_DYN_CON_ctr") tickyEnterStaticCon = ifTicky $ bumpTickyCounter (fsLit "ENT_STATIC_CON_ctr") tickyEnterViaNode = ifTicky $ bumpTickyCounter (fsLit "ENT_VIA_NODE_ctr") -tickyEnterThunk :: FCode () -tickyEnterThunk = ifTicky $ do - bumpTickyCounter (fsLit "ENT_DYN_THK_ctr") - ifTickyDynThunk $ do - ticky_ctr_lbl <- getTickyCtrLabel - registerTickyCtrAtEntryDyn ticky_ctr_lbl - bumpTickyEntryCount ticky_ctr_lbl +tickyEnterThunk :: ClosureInfo -> FCode () +tickyEnterThunk cl_info + = ifTicky $ do + { bumpTickyCounter ctr + ; unless static $ do + ticky_ctr_lbl <- getTickyCtrLabel + registerTickyCtrAtEntryDyn ticky_ctr_lbl + bumpTickyEntryCount ticky_ctr_lbl } + where + updatable = closureSingleEntry cl_info + static = isStaticClosure cl_info + + ctr | static = if updatable then fsLit "ENT_STATIC_THK_SINGLE_ctr" + else fsLit "ENT_STATIC_THK_MANY_ctr" + | otherwise = if updatable then fsLit "ENT_DYN_THK_SINGLE_ctr" + else fsLit "ENT_DYN_THK_MANY_ctr" -tickyEnterStdThunk :: FCode () +tickyEnterStdThunk :: ClosureInfo -> FCode () tickyEnterStdThunk = tickyEnterThunk tickyBlackHole :: Bool{-updatable-} -> FCode () diff --git a/compiler/coreSyn/CoreArity.lhs b/compiler/coreSyn/CoreArity.lhs index 6b9e3e8d9fe7..7bf15d8216d5 100644 --- a/compiler/coreSyn/CoreArity.lhs +++ b/compiler/coreSyn/CoreArity.lhs @@ -30,7 +30,7 @@ import Var import VarEnv import Id import Type -import TyCon ( isRecursiveTyCon, isClassTyCon ) +import TyCon ( initRecTc, checkRecTc ) import Coercion import BasicTypes import Unique @@ -88,7 +88,7 @@ exprArity e = go e go (Lam x e) | isId x = go e + 1 | otherwise = go e go (Tick t e) | not (tickishIsCode t) = go e - go (Cast e co) = go e `min` length (typeArity (pSnd (coercionKind co))) + go (Cast e co) = trim_arity (go e) (pSnd (coercionKind co)) -- Note [exprArity invariant] go (App e (Type _)) = go e go (App f a) | exprIsTrivial a = (go f - 1) `max` 0 @@ -97,6 +97,8 @@ exprArity e = go e go _ = 0 + trim_arity :: Arity -> Type -> Arity + trim_arity arity ty = arity `min` length (typeArity ty) --------------- typeArity :: Type -> [OneShot] @@ -104,24 +106,32 @@ typeArity :: Type -> [OneShot] -- We look through foralls, and newtypes -- See Note [exprArity invariant] typeArity ty - | Just (_, ty') <- splitForAllTy_maybe ty - = typeArity ty' - - | Just (arg,res) <- splitFunTy_maybe ty - = isStateHackType arg : typeArity res - - | Just (tc,tys) <- splitTyConApp_maybe ty - , Just (ty', _) <- instNewTyCon_maybe tc tys - , not (isRecursiveTyCon tc) - , not (isClassTyCon tc) -- Do not eta-expand through newtype classes - -- See Note [Newtype classes and eta expansion] - = typeArity ty' + = go initRecTc ty + where + go rec_nts ty + | Just (_, ty') <- splitForAllTy_maybe ty + = go rec_nts ty' + + | Just (arg,res) <- splitFunTy_maybe ty + = isStateHackType arg : go rec_nts res + + | Just (tc,tys) <- splitTyConApp_maybe ty + , Just (ty', _) <- instNewTyCon_maybe tc tys + , Just rec_nts' <- checkRecTc rec_nts tc -- See Note [Expanding newtypes] + -- in TyCon +-- , not (isClassTyCon tc) -- Do not eta-expand through newtype classes +-- -- See Note [Newtype classes and eta expansion] +-- (no longer required) + = go rec_nts' ty' -- Important to look through non-recursive newtypes, so that, eg -- (f x) where f has arity 2, f :: Int -> IO () -- Here we want to get arity 1 for the result! + -- + -- AND through a layer of recursive newtypes + -- e.g. newtype Stream m a b = Stream (m (Either b (a, Stream m a b))) - | otherwise - = [] + | otherwise + = [] --------------- exprBotStrictness_maybe :: CoreExpr -> Maybe (Arity, StrictSig) @@ -168,6 +178,11 @@ in exprArity. That is a less local change, so I'm going to leave it for today! Note [Newtype classes and eta expansion] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + NB: this nasty special case is no longer required, becuase + for newtype classes we don't use the class-op rule mechanism + at all. See Note [Single-method classes] in TcInstDcls. SLPJ May 2013 + +-------- Old out of date comments, just for interest ----------- We have to be careful when eta-expanding through newtypes. In general it's a good idea, but annoyingly it interacts badly with the class-op rule mechanism. Consider @@ -207,6 +222,7 @@ exprIsConApp_maybe won't hold of the argument to op. I considered trying to *make* it hold, but it's tricky and I gave up. The test simplCore/should_compile/T3722 is an excellent example. +-------- End of old out of date comments, just for interest ----------- Note [exprArity for applications] @@ -542,7 +558,7 @@ PAPSs f = g d ==> f = \x. g d x because that might in turn make g inline (if it has an inline pragma), which we might not want. After all, INLINE pragmas say "inline only -when saturate" so we don't want to be too gung-ho about saturating! +when saturated" so we don't want to be too gung-ho about saturating! \begin{code} arityLam :: Id -> ArityType -> ArityType @@ -726,7 +742,7 @@ The biggest reason for doing this is for cases like True -> \y -> e1 False -> \y -> e2 -Here we want to get the lambdas together. A good exmaple is the nofib +Here we want to get the lambdas together. A good example is the nofib program fibheaps, which gets 25% more allocation if you don't do this eta-expansion. diff --git a/compiler/coreSyn/CoreFVs.lhs b/compiler/coreSyn/CoreFVs.lhs index 2a11723fa9f6..636c049c4254 100644 --- a/compiler/coreSyn/CoreFVs.lhs +++ b/compiler/coreSyn/CoreFVs.lhs @@ -104,8 +104,17 @@ type InterestingVarFun = Var -> Bool \begin{code} type FV = InterestingVarFun - -> VarSet -- In scope + -> VarSet -- Locally bound -> VarSet -- Free vars + -- Return the vars that are both (a) interesting + -- and (b) not locally bound + -- See function keep_it + +keep_it :: InterestingVarFun -> VarSet -> Var -> Bool +keep_it fv_cand in_scope var + | var `elemVarSet` in_scope = False + | fv_cand var = True + | otherwise = False union :: FV -> FV -> FV union fv1 fv2 fv_cand in_scope = fv1 fv_cand in_scope `unionVarSet` fv2 fv_cand in_scope @@ -152,13 +161,6 @@ someVars :: VarSet -> FV someVars vars fv_cand in_scope = filterVarSet (keep_it fv_cand in_scope) vars -keep_it :: InterestingVarFun -> VarSet -> Var -> Bool -keep_it fv_cand in_scope var - | var `elemVarSet` in_scope = False - | fv_cand var = True - | otherwise = False - - addBndr :: CoreBndr -> FV -> FV addBndr bndr fv fv_cand in_scope = someVars (varTypeTyVars bndr) fv_cand in_scope @@ -434,15 +436,18 @@ idUnfoldingVars :: Id -> VarSet -- and we'll get exponential behaviour if we look at both unf and rhs! -- But do look at the *real* unfolding, even for loop breakers, else -- we might get out-of-scope variables -idUnfoldingVars id = stableUnfoldingVars isLocalId (realIdUnfolding id) `orElse` emptyVarSet +idUnfoldingVars id = stableUnfoldingVars (realIdUnfolding id) `orElse` emptyVarSet -stableUnfoldingVars :: InterestingVarFun -> Unfolding -> Maybe VarSet -stableUnfoldingVars fv_cand unf +stableUnfoldingVars :: Unfolding -> Maybe VarSet +stableUnfoldingVars unf = case unf of CoreUnfolding { uf_tmpl = rhs, uf_src = src } - | isStableSource src -> Just (exprSomeFreeVars fv_cand rhs) - DFunUnfolding _ _ args -> Just (exprsSomeFreeVars fv_cand (dfunArgExprs args)) - _other -> Nothing + | isStableSource src + -> Just (exprFreeVars rhs) + DFunUnfolding { df_bndrs = bndrs, df_args = args } + -> Just (exprs_fvs args isLocalVar (mkVarSet bndrs)) + -- DFuns are top level, so no fvs from types of bndrs + _other -> Nothing \end{code} diff --git a/compiler/coreSyn/CoreLint.lhs b/compiler/coreSyn/CoreLint.lhs index 32de2a44f02a..f9256e18ad76 100644 --- a/compiler/coreSyn/CoreLint.lhs +++ b/compiler/coreSyn/CoreLint.lhs @@ -199,21 +199,25 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs) do { ty <- lintCoreExpr rhs ; lintBinder binder -- Check match to RHS type ; binder_ty <- applySubstTy binder_ty - ; checkTys binder_ty ty (mkRhsMsg binder ty) + ; checkTys binder_ty ty (mkRhsMsg binder (ptext (sLit "RHS")) ty) + -- Check (not isUnLiftedType) (also checks for bogus unboxed tuples) ; checkL (not (isUnLiftedType binder_ty) || (isNonRec rec_flag && exprOkForSpeculation rhs)) (mkRhsPrimMsg binder rhs) + -- Check that if the binder is top-level or recursive, it's not demanded ; checkL (not (isStrictId binder) || (isNonRec rec_flag && not (isTopLevel top_lvl_flag))) (mkStrictMsg binder) + -- Check that if the binder is local, it is not marked as exported ; checkL (not (isExportedId binder) || isTopLevel top_lvl_flag) (mkNonTopExportedMsg binder) -- Check that if the binder is local, it does not have an external name ; checkL (not (isExternalName (Var.varName binder)) || isTopLevel top_lvl_flag) (mkNonTopExternalNameMsg binder) + -- Check whether binder's specialisations contain any out-of-scope variables ; mapM_ (checkBndrIdInScope binder) bndr_vars @@ -225,7 +229,9 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs) -- already happened) ; checkL (case dmdTy of StrictSig dmd_ty -> idArity binder >= dmdTypeDepth dmd_ty || exprIsTrivial rhs) - (mkArityMsg binder) } + (mkArityMsg binder) + + ; lintIdUnfolding binder binder_ty (idUnfolding binder) } -- We should check the unfolding, if any, but this is tricky because -- the unfolding is a SimplifiableCoreExpr. Give up for now. @@ -238,6 +244,14 @@ lintSingleBinding top_lvl_flag rec_flag (binder,rhs) -- See Note [GHC Formalism] lintBinder var | isId var = lintIdBndr var $ \_ -> (return ()) | otherwise = return () + +lintIdUnfolding :: Id -> Type -> Unfolding -> LintM () +lintIdUnfolding bndr bndr_ty (CoreUnfolding { uf_tmpl = rhs, uf_src = src }) + | isStableSource src + = do { ty <- lintCoreExpr rhs + ; checkTys bndr_ty ty (mkRhsMsg bndr (ptext (sLit "unfolding")) ty) } +lintIdUnfolding _ _ _ + = return () -- We could check more \end{code} %************************************************************************ @@ -1263,10 +1277,10 @@ mkTyAppMsg ty arg_ty hang (ptext (sLit "Arg type:")) 4 (ppr arg_ty <+> dcolon <+> ppr (typeKind arg_ty))] -mkRhsMsg :: Id -> Type -> MsgDoc -mkRhsMsg binder ty +mkRhsMsg :: Id -> SDoc -> Type -> MsgDoc +mkRhsMsg binder what ty = vcat - [hsep [ptext (sLit "The type of this binder doesn't match the type of its RHS:"), + [hsep [ptext (sLit "The type of this binder doesn't match the type of its") <+> what <> colon, ppr binder], hsep [ptext (sLit "Binder's type:"), ppr (idType binder)], hsep [ptext (sLit "Rhs type:"), ppr ty]] diff --git a/compiler/coreSyn/CorePrep.lhs b/compiler/coreSyn/CorePrep.lhs index 3d642bdd654c..d87fdfc1971a 100644 --- a/compiler/coreSyn/CorePrep.lhs +++ b/compiler/coreSyn/CorePrep.lhs @@ -345,12 +345,13 @@ cpeBind :: TopLevelFlag -> CorePrepEnv -> CoreBind -> UniqSM (CorePrepEnv, Floats) cpeBind top_lvl env (NonRec bndr rhs) = do { (_, bndr1) <- cpCloneBndr env bndr - ; let is_strict = isStrictDmd (idDemandInfo bndr) + ; let dmd = idDemandInfo bndr is_unlifted = isUnLiftedType (idType bndr) ; (floats, bndr2, rhs2) <- cpePair top_lvl NonRecursive - (is_strict || is_unlifted) + dmd + is_unlifted env bndr1 rhs - ; let new_float = mkFloat is_strict is_unlifted bndr2 rhs2 + ; let new_float = mkFloat dmd is_unlifted bndr2 rhs2 -- We want bndr'' in the envt, because it records -- the evaluated-ness of the binder @@ -360,7 +361,7 @@ cpeBind top_lvl env (NonRec bndr rhs) cpeBind top_lvl env (Rec pairs) = do { let (bndrs,rhss) = unzip pairs ; (env', bndrs1) <- cpCloneBndrs env (map fst pairs) - ; stuff <- zipWithM (cpePair top_lvl Recursive False env') bndrs1 rhss + ; stuff <- zipWithM (cpePair top_lvl Recursive topDmd False env') bndrs1 rhss ; let (floats_s, bndrs2, rhss2) = unzip3 stuff all_pairs = foldrOL add_float (bndrs2 `zip` rhss2) @@ -375,11 +376,11 @@ cpeBind top_lvl env (Rec pairs) add_float b _ = pprPanic "cpeBind" (ppr b) --------------- -cpePair :: TopLevelFlag -> RecFlag -> RhsDemand +cpePair :: TopLevelFlag -> RecFlag -> Demand -> Bool -> CorePrepEnv -> Id -> CoreExpr -> UniqSM (Floats, Id, CpeRhs) -- Used for all bindings -cpePair top_lvl is_rec is_strict_or_unlifted env bndr rhs +cpePair top_lvl is_rec dmd is_unlifted env bndr rhs = do { (floats1, rhs1) <- cpeRhsE env rhs -- See if we are allowed to float this stuff out of the RHS @@ -392,7 +393,7 @@ cpePair top_lvl is_rec is_strict_or_unlifted env bndr rhs else WARN(True, text "CorePrep: silly extra arguments:" <+> ppr bndr) -- Note [Silly extra arguments] (do { v <- newVar (idType bndr) - ; let float = mkFloat False False v rhs2 + ; let float = mkFloat topDmd False v rhs2 ; return ( addFloat floats2 float , cpeEtaExpand arity (Var v)) }) @@ -406,6 +407,8 @@ cpePair top_lvl is_rec is_strict_or_unlifted env bndr rhs ; return (floats3, bndr', rhs') } where + is_strict_or_unlifted = (isStrictDmd dmd) || is_unlifted + platform = targetPlatform (cpe_dynFlags env) arity = idArity bndr -- We must match this arity @@ -650,9 +653,8 @@ cpeApp env expr [] -> (topDmd, []) (arg_ty, res_ty) = expectJust "cpeBody:collect_args" $ splitFunTy_maybe fun_ty - is_strict = isStrictDmd ss1 - ; (fs, arg') <- cpeArg env is_strict arg arg_ty + ; (fs, arg') <- cpeArg env ss1 arg arg_ty ; return (App fun' arg', hd, res_ty, fs `appendFloats` floats, ss_rest) } collect_args (Var v) depth @@ -682,8 +684,8 @@ cpeApp env expr -- N-variable fun, better let-bind it collect_args fun depth - = do { (fun_floats, fun') <- cpeArg env True fun ty - -- The True says that it's sure to be evaluated, + = do { (fun_floats, fun') <- cpeArg env evalDmd fun ty + -- The evalDmd says that it's sure to be evaluated, -- so we'll end up case-binding it ; return (fun', (fun', depth), ty, fun_floats, []) } where @@ -694,9 +696,9 @@ cpeApp env expr -- --------------------------------------------------------------------------- -- This is where we arrange that a non-trivial argument is let-bound -cpeArg :: CorePrepEnv -> RhsDemand +cpeArg :: CorePrepEnv -> Demand -> CoreArg -> Type -> UniqSM (Floats, CpeTriv) -cpeArg env is_strict arg arg_ty +cpeArg env dmd arg arg_ty = do { (floats1, arg1) <- cpeRhsE env arg -- arg1 can be a lambda ; (floats2, arg2) <- if want_float floats1 arg1 then return (floats1, arg1) @@ -710,11 +712,12 @@ cpeArg env is_strict arg arg_ty else do { v <- newVar arg_ty ; let arg3 = cpeEtaExpand (exprArity arg2) arg2 - arg_float = mkFloat is_strict is_unlifted v arg3 + arg_float = mkFloat dmd is_unlifted v arg3 ; return (addFloat floats2 arg_float, varToCoreExpr v) } } where is_unlifted = isUnLiftedType arg_ty - want_float = wantFloatNested NonRecursive (is_strict || is_unlifted) + is_strict = isStrictDmd dmd + want_float = wantFloatNested NonRecursive (is_strict || is_unlifted) \end{code} Note [Floating unlifted arguments] @@ -909,20 +912,16 @@ tryEtaReducePrep _ _ = Nothing \end{code} --- ----------------------------------------------------------------------------- --- Demands --- ----------------------------------------------------------------------------- - -\begin{code} -type RhsDemand = Bool -- True => used strictly; hence not top-level, non-recursive -\end{code} - %************************************************************************ %* * Floats %* * %************************************************************************ +Note [Pin demand info on floats] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +We pin demand info on floated lets so that we can see the one-shot thunks. + \begin{code} data FloatingBind = FloatLet CoreBind -- Rhs of bindings are CpeRhss @@ -957,12 +956,16 @@ data OkToSpec -- ok-to-speculate unlifted bindings | NotOkToSpec -- Some not-ok-to-speculate unlifted bindings -mkFloat :: Bool -> Bool -> Id -> CpeRhs -> FloatingBind -mkFloat is_strict is_unlifted bndr rhs +mkFloat :: Demand -> Bool -> Id -> CpeRhs -> FloatingBind +mkFloat dmd is_unlifted bndr rhs | use_case = FloatCase bndr rhs (exprOkForSpeculation rhs) - | otherwise = FloatLet (NonRec bndr rhs) + | is_hnf = FloatLet (NonRec bndr rhs) + | otherwise = FloatLet (NonRec (setIdDemandInfo bndr dmd) rhs) + -- See Note [Pin demand info on floats] where - use_case = is_unlifted || is_strict && not (exprIsHNF rhs) + is_hnf = exprIsHNF rhs + is_strict = isStrictDmd dmd + use_case = is_unlifted || is_strict && not is_hnf -- Don't make a case for a value binding, -- even if it's strict. Otherwise we get -- case (\x -> e) of ...! diff --git a/compiler/coreSyn/CoreSubst.lhs b/compiler/coreSyn/CoreSubst.lhs index 95cb7f8fbb8d..bc9c767d29a1 100644 --- a/compiler/coreSyn/CoreSubst.lhs +++ b/compiler/coreSyn/CoreSubst.lhs @@ -59,7 +59,6 @@ import Type hiding ( substTy, extendTvSubst, extendTvSubstList , isInScope, substTyVarBndr, cloneTyVarBndr ) import Coercion hiding ( substTy, substCo, extendTvSubst, substTyVarBndr, substCoVarBndr ) -import TcType ( tcSplitDFunTy ) import TyCon ( tyConArity ) import DataCon import PrelNames ( eqBoxDataConKey ) @@ -78,7 +77,6 @@ import Maybes import ErrUtils import DynFlags import BasicTypes ( isAlwaysActive ) -import ListSetOps import Util import Pair import Outputable @@ -656,10 +654,11 @@ substUnfoldingSC subst unf -- Short-cut version | isEmptySubst subst = unf | otherwise = substUnfolding subst unf -substUnfolding subst (DFunUnfolding ar con args) - = DFunUnfolding ar con (map subst_arg args) +substUnfolding subst df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) + = df { df_bndrs = bndrs', df_args = args' } where - subst_arg = fmap (substExpr (text "dfun-unf") subst) + (subst',bndrs') = substBndrs subst bndrs + args' = map (substExpr (text "subst-unf:dfun") subst') args substUnfolding subst unf@(CoreUnfolding { uf_tmpl = tmpl, uf_src = src }) -- Retain an InlineRule! @@ -923,6 +922,8 @@ simple_opt_expr :: Subst -> InExpr -> OutExpr simple_opt_expr subst expr = go expr where + in_scope_env = (substInScope subst, simpleUnfoldingFun) + go (Var v) = lookupIdSubst (text "simpleOptExpr") subst v go (App e1 e2) = simple_app subst e1 [go e2] go (Type ty) = Type (substTy subst ty) @@ -942,7 +943,7 @@ simple_opt_expr subst expr go (Case e b ty as) -- See Note [Optimise coercion boxes agressively] | isDeadBinder b - , Just (con, _tys, es) <- expr_is_con_app e' + , Just (con, _tys, es) <- exprIsConApp_maybe in_scope_env e' , Just (altcon, bs, rhs) <- findAlt (DataAlt con) as = case altcon of DEFAULT -> go rhs @@ -1109,8 +1110,10 @@ add_info subst old_bndr new_bndr | otherwise = maybeModifyIdInfo mb_new_info new_bndr where mb_new_info = substIdInfo subst new_bndr (idInfo old_bndr) -expr_is_con_app :: OutExpr -> Maybe (DataCon, [Type], [OutExpr]) -expr_is_con_app = exprIsConApp_maybe (\id -> if isAlwaysActive (idInlineActivation id) then idUnfolding id else noUnfolding) +simpleUnfoldingFun :: IdUnfoldingFun +simpleUnfoldingFun id + | isAlwaysActive (idInlineActivation id) = idUnfolding id + | otherwise = noUnfolding \end{code} Note [Inline prag in simplOpt] @@ -1158,12 +1161,10 @@ data ConCont = CC [CoreExpr] Coercion -- | Returns @Just (dc, [t1..tk], [x1..xn])@ if the argument expression is -- a *saturated* constructor application of the form @dc t1..tk x1 .. xn@, -- where t1..tk are the *universally-qantified* type args of 'dc' -exprIsConApp_maybe :: IdUnfoldingFun -> CoreExpr -> Maybe (DataCon, [Type], [CoreExpr]) -exprIsConApp_maybe id_unf expr +exprIsConApp_maybe :: InScopeEnv -> CoreExpr -> Maybe (DataCon, [Type], [CoreExpr]) +exprIsConApp_maybe (in_scope, id_unf) expr = go (Left in_scope) expr (CC [] (mkReflCo (exprType expr))) where - in_scope = mkInScopeSet (exprFreeVars expr) - go :: Either InScopeSet Subst -> CoreExpr -> ConCont -> Maybe (DataCon, [Type], [CoreExpr]) @@ -1184,17 +1185,13 @@ exprIsConApp_maybe id_unf expr go (Left in_scope) (Var fun) cont@(CC args co) | Just con <- isDataConWorkId_maybe fun , count isValArg args == idArity fun - , let (univ_ty_args, rest_args) = splitAtList (dataConUnivTyVars con) args - = dealWithCoercion co (con, stripTypeArgs univ_ty_args, rest_args) + = dealWithCoercion co con args -- Look through dictionary functions; see Note [Unfolding DFuns] - | DFunUnfolding dfun_nargs con ops <- unfolding - , length args == dfun_nargs -- See Note [DFun arity check] - , let (dfun_tvs, _theta, _cls, dfun_res_tys) = tcSplitDFunTy (idType fun) - subst = zipOpenTvSubst dfun_tvs (stripTypeArgs (takeList dfun_tvs args)) - mk_arg (DFunPolyArg e) = mkApps e args - mk_arg (DFunLamArg i) = getNth args i - = dealWithCoercion co (con, substTys subst dfun_res_tys, map mk_arg ops) + | DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = dfun_args } <- unfolding + , bndrs `equalLength` args -- See Note [DFun arity check] + , let subst = mkOpenSubst in_scope (bndrs `zip` args) + = dealWithCoercion co con (map (substExpr (text "exprIsConApp1") subst) dfun_args) -- Look through unfoldings, but only arity-zero one; -- if arity > 0 we are effectively inlining a function call, @@ -1217,17 +1214,17 @@ exprIsConApp_maybe id_unf expr subst_co (Right s) co = CoreSubst.substCo s co subst_arg (Left {}) e = e - subst_arg (Right s) e = substExpr (text "exprIsConApp") s e + subst_arg (Right s) e = substExpr (text "exprIsConApp2") s e extend (Left in_scope) v e = Right (extendSubst (mkEmptySubst in_scope) v e) extend (Right s) v e = Right (extendSubst s v e) -dealWithCoercion :: Coercion - -> (DataCon, [Type], [CoreExpr]) +dealWithCoercion :: Coercion -> DataCon -> [CoreExpr] -> Maybe (DataCon, [Type], [CoreExpr]) -dealWithCoercion co stuff@(dc, _dc_univ_args, dc_args) +dealWithCoercion co dc dc_args | isReflCo co - = Just stuff + , let (univ_ty_args, rest_args) = splitAtList (dataConUnivTyVars dc) dc_args + = Just (dc, stripTypeArgs univ_ty_args, rest_args) | Pair _from_ty to_ty <- coercionKind co , Just (to_tc, to_tc_arg_tys) <- splitTyConApp_maybe to_ty @@ -1250,7 +1247,8 @@ dealWithCoercion co stuff@(dc, _dc_univ_args, dc_args) dc_ex_tyvars = dataConExTyVars dc arg_tys = dataConRepArgTys dc - (ex_args, val_args) = splitAtList dc_ex_tyvars dc_args + non_univ_args = dropList dc_univ_tyvars dc_args + (ex_args, val_args) = splitAtList dc_ex_tyvars non_univ_args -- Make the "theta" from Fig 3 of the paper gammas = decomposeCo tc_arity co @@ -1263,10 +1261,11 @@ dealWithCoercion co stuff@(dc, _dc_univ_args, dc_args) cast_arg arg_ty arg = mkCast arg (theta_subst arg_ty) dump_doc = vcat [ppr dc, ppr dc_univ_tyvars, ppr dc_ex_tyvars, - ppr arg_tys, ppr dc_args, ppr _dc_univ_args, + ppr arg_tys, ppr dc_args, ppr ex_args, ppr val_args, ppr co, ppr _from_ty, ppr to_ty, ppr to_tc ] in - ASSERT2( eqType _from_ty (mkTyConApp to_tc _dc_univ_args), dump_doc ) + ASSERT2( eqType _from_ty (mkTyConApp to_tc (stripTypeArgs $ takeList dc_univ_tyvars dc_args)) + , dump_doc ) ASSERT2( all isTypeArg ex_args, dump_doc ) ASSERT2( equalLength val_args arg_tys, dump_doc ) Just (dc, to_tc_arg_tys, ex_args ++ new_val_args) @@ -1299,16 +1298,16 @@ type args) matches what the dfun is expecting. This may be *less* than the ordinary arity of the dfun: see Note [DFun unfoldings] in CoreSyn \begin{code} -exprIsLiteral_maybe :: IdUnfoldingFun -> CoreExpr -> Maybe Literal +exprIsLiteral_maybe :: InScopeEnv -> CoreExpr -> Maybe Literal -- Same deal as exprIsConApp_maybe, but much simpler -- Nevertheless we do need to look through unfoldings for -- Integer literals, which are vigorously hoisted to top level -- and not subsequently inlined -exprIsLiteral_maybe id_unf e +exprIsLiteral_maybe env@(_, id_unf) e = case e of Lit l -> Just l - Tick _ e' -> exprIsLiteral_maybe id_unf e' -- dubious? + Tick _ e' -> exprIsLiteral_maybe env e' -- dubious? Var v | Just rhs <- expandUnfolding_maybe (id_unf v) - -> exprIsLiteral_maybe id_unf rhs + -> exprIsLiteral_maybe env rhs _ -> Nothing \end{code} diff --git a/compiler/coreSyn/CoreSyn.lhs b/compiler/coreSyn/CoreSyn.lhs index 6bd25fdeae31..ede3a4052b97 100644 --- a/compiler/coreSyn/CoreSyn.lhs +++ b/compiler/coreSyn/CoreSyn.lhs @@ -49,7 +49,6 @@ module CoreSyn ( -- * Unfolding data types Unfolding(..), UnfoldingGuidance(..), UnfoldingSource(..), - DFunArg(..), dfunArgExprs, -- ** Constructing 'Unfolding's noUnfolding, evaldUnfolding, mkOtherCon, @@ -78,7 +77,7 @@ module CoreSyn ( -- * Core rule data types CoreRule(..), -- CoreSubst, CoreTidy, CoreFVs, PprCore only - RuleName, IdUnfoldingFun, + RuleName, RuleFun, IdUnfoldingFun, InScopeEnv, -- ** Operations on 'CoreRule's seqRules, ruleArity, ruleName, ruleIdName, ruleActivation, @@ -92,6 +91,7 @@ module CoreSyn ( #include "HsVersions.h" import CostCentre +import VarEnv( InScopeSet ) import Var import Type import Coercion @@ -577,13 +577,16 @@ data CoreRule ru_fn :: Name, -- ^ As above ru_nargs :: Int, -- ^ Number of arguments that 'ru_try' consumes, -- if it fires, including type arguments - ru_try :: DynFlags -> Id -> IdUnfoldingFun -> [CoreExpr] -> Maybe CoreExpr + ru_try :: RuleFun -- ^ This function does the rewrite. It given too many -- arguments, it simply discards them; the returned 'CoreExpr' -- is just the rewrite of 'ru_fn' applied to the first 'ru_nargs' args } -- See Note [Extra args in rule matching] in Rules.lhs +type RuleFun = DynFlags -> InScopeEnv -> Id -> [CoreExpr] -> Maybe CoreExpr +type InScopeEnv = (InScopeSet, IdUnfoldingFun) + type IdUnfoldingFun = Id -> Unfolding -- A function that embodies how to unfold an Id if you need -- to do that in the Rule. The reason we need to pass this info in @@ -663,17 +666,15 @@ data Unfolding -- -- Here, @f@ gets an @OtherCon []@ unfolding. - | DFunUnfolding -- The Unfolding of a DFunId + | DFunUnfolding { -- The Unfolding of a DFunId -- See Note [DFun unfoldings] - -- df = /\a1..am. \d1..dn. MkD (op1 a1..am d1..dn) + -- df = /\a1..am. \d1..dn. MkD t1 .. tk + -- (op1 a1..am d1..dn) -- (op2 a1..am d1..dn) - - Arity -- Arity = m+n, the *total* number of args - -- (unusually, both type and value) to the dfun - - DataCon -- The dictionary data constructor (possibly a newtype datacon) - - [DFunArg CoreExpr] -- Specification of superclasses and methods, in positional order + df_bndrs :: [Var], -- The bound variables [a1..m],[d1..dn] + df_con :: DataCon, -- The dictionary data constructor (never a newtype datacon) + df_args :: [CoreExpr] -- Args of the data con: types, superclasses and methods, + } -- in positional order | CoreUnfolding { -- An unfolding for an Id with no pragma, -- or perhaps a NOINLINE pragma @@ -710,20 +711,6 @@ data Unfolding -- -- uf_guidance: Tells us about the /size/ of the unfolding template ------------------------------------------------- -data DFunArg e -- Given (df a b d1 d2 d3) - = DFunPolyArg e -- Arg is (e a b d1 d2 d3) - | DFunLamArg Int -- Arg is one of [a,b,d1,d2,d3], zero indexed - deriving( Functor ) - - -- 'e' is often CoreExpr, which are usually variables, but can - -- be trivial expressions instead (e.g. a type application). - -dfunArgExprs :: [DFunArg e] -> [e] -dfunArgExprs [] = [] -dfunArgExprs (DFunPolyArg e : as) = e : dfunArgExprs as -dfunArgExprs (DFunLamArg {} : as) = dfunArgExprs as - ------------------------------------------------ data UnfoldingSource diff --git a/compiler/coreSyn/CoreTidy.lhs b/compiler/coreSyn/CoreTidy.lhs index 77a85c241eec..8d45fbb9b412 100644 --- a/compiler/coreSyn/CoreTidy.lhs +++ b/compiler/coreSyn/CoreTidy.lhs @@ -206,8 +206,11 @@ tidyIdBndr env@(tidy_env, var_env) id ------------ Unfolding -------------- tidyUnfolding :: TidyEnv -> Unfolding -> Unfolding -> Unfolding -tidyUnfolding tidy_env (DFunUnfolding ar con args) _ - = DFunUnfolding ar con (map (fmap (tidyExpr tidy_env)) args) +tidyUnfolding tidy_env df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) _ + = df { df_bndrs = bndrs', df_args = map (tidyExpr tidy_env') args } + where + (tidy_env', bndrs') = tidyBndrs tidy_env bndrs + tidyUnfolding tidy_env unf@(CoreUnfolding { uf_tmpl = unf_rhs, uf_src = src }) unf_from_rhs diff --git a/compiler/coreSyn/CoreUnfold.lhs b/compiler/coreSyn/CoreUnfold.lhs index 513bb2216671..83a40d299a2a 100644 --- a/compiler/coreSyn/CoreUnfold.lhs +++ b/compiler/coreSyn/CoreUnfold.lhs @@ -48,7 +48,6 @@ module CoreUnfold ( import DynFlags import CoreSyn import PprCore () -- Instances -import TcType ( tcSplitDFunTy ) import OccurAnal ( occurAnalyseExpr ) import CoreSubst hiding( substTy ) import CoreArity ( manifestArity, exprBotStrictness_maybe ) @@ -98,13 +97,9 @@ mkImplicitUnfolding dflags expr mkSimpleUnfolding :: DynFlags -> CoreExpr -> Unfolding mkSimpleUnfolding dflags = mkUnfolding dflags InlineRhs False False -mkDFunUnfolding :: Type -> [DFunArg CoreExpr] -> Unfolding -mkDFunUnfolding dfun_ty ops - = DFunUnfolding dfun_nargs data_con ops - where - (tvs, theta, cls, _) = tcSplitDFunTy dfun_ty - dfun_nargs = length tvs + length theta - data_con = classDataCon cls +mkDFunUnfolding :: [Var] -> DataCon -> [CoreExpr] -> Unfolding +mkDFunUnfolding bndrs con ops + = DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = ops } mkWwInlineRule :: Id -> CoreExpr -> Arity -> Unfolding mkWwInlineRule id expr arity @@ -952,6 +947,8 @@ tryUnfolding dflags id lone_variable where n_val_args = length arg_infos saturated = n_val_args >= uf_arity + cont_info' | n_val_args > uf_arity = ValAppCtxt + | otherwise = cont_info result | yes_or_no = Just unf_template | otherwise = Nothing @@ -969,12 +966,11 @@ tryUnfolding dflags id lone_variable some_benefit | not saturated = interesting_args -- Under-saturated -- Note [Unsaturated applications] - | n_val_args > uf_arity = True -- Over-saturated - | otherwise = interesting_args -- Saturated - || interesting_saturated_call + | otherwise = interesting_args -- Saturated or over-saturated + || interesting_call - interesting_saturated_call - = case cont_info of + interesting_call + = case cont_info' of BoringCtxt -> not is_top && uf_arity > 0 -- Note [Nested functions] CaseCtxt -> not (lone_variable && is_wf) -- Note [Lone variables] ArgCtxt {} -> uf_arity > 0 -- Note [Inlining in ArgCtxt] @@ -996,7 +992,7 @@ tryUnfolding dflags id lone_variable discounted_size = size - discount small_enough = discounted_size <= ufUseThreshold dflags discount = computeDiscount dflags uf_arity arg_discounts - res_discount arg_infos cont_info + res_discount arg_infos cont_info' \end{code} Note [RHS of lets] @@ -1116,7 +1112,7 @@ AND then we should not inline it (unless there is some other reason, e.g. is is the sole occurrence). That is what is happening at -the use of 'lone_variable' in 'interesting_saturated_call'. +the use of 'lone_variable' in 'interesting_call'. Why? At least in the case-scrutinee situation, turning let x = (a,b) in case x of y -> ... @@ -1187,9 +1183,9 @@ This kind of thing can occur if you have which Roman did. \begin{code} -computeDiscount :: DynFlags -> Int -> [Int] -> Int -> [ArgSummary] -> CallCtxt +computeDiscount :: DynFlags -> Arity -> [Int] -> Int -> [ArgSummary] -> CallCtxt -> Int -computeDiscount dflags n_vals_wanted arg_discounts res_discount arg_infos cont_info +computeDiscount dflags uf_arity arg_discounts res_discount arg_infos cont_info -- We multiple the raw discounts (args_discount and result_discount) -- ty opt_UnfoldingKeenessFactor because the former have to do with -- *size* whereas the discounts imply that there's some extra @@ -1199,7 +1195,7 @@ computeDiscount dflags n_vals_wanted arg_discounts res_discount arg_infos cont_i = 10 -- Discount of 1 because the result replaces the call -- so we count 1 for the function itself - + 10 * length (take n_vals_wanted arg_infos) + + 10 * length (take uf_arity arg_infos) -- Discount of (un-scaled) 1 for each arg supplied, -- because the result replaces the call @@ -1214,8 +1210,9 @@ computeDiscount dflags n_vals_wanted arg_discounts res_discount arg_infos cont_i res_discount' = case cont_info of BoringCtxt -> 0 - CaseCtxt -> res_discount - _other -> 40 `min` res_discount + CaseCtxt -> res_discount -- Presumably a constructor + ValAppCtxt -> res_discount -- Presumably a function + ArgCtxt {} -> 40 `min` res_discount -- res_discount can be very large when a function returns -- constructors; but we only want to invoke that large discount -- when there's a case continuation. diff --git a/compiler/coreSyn/CoreUtils.lhs b/compiler/coreSyn/CoreUtils.lhs index 4e45da4b4bd8..00f704f7c87a 100644 --- a/compiler/coreSyn/CoreUtils.lhs +++ b/compiler/coreSyn/CoreUtils.lhs @@ -30,9 +30,6 @@ module CoreUtils ( coreBindsSize, exprSize, CoreStats(..), coreBindsStats, - -- * Hashing - hashExpr, - -- * Equality cheapEqExpr, eqExpr, eqExprX, @@ -70,8 +67,6 @@ import Maybes import Platform import Util import Pair -import Data.Word -import Data.Bits import Data.List \end{code} @@ -567,8 +562,8 @@ getIdFromTrivialExpr e = go e \end{code} exprIsBottom is a very cheap and cheerful function; it may return -False for bottoming expressions, but it never costs much to ask. -See also CoreArity.exprBotStrictness_maybe, but that's a bit more +False for bottoming expressions, but it never costs much to ask. See +also CoreArity.exprBotStrictness_maybe, but that's a bit more expensive. \begin{code} @@ -1517,81 +1512,6 @@ altSize (c,bs,e) = c `seq` bndrsSize bs + exprSize e \end{code} -%************************************************************************ -%* * -\subsection{Hashing} -%* * -%************************************************************************ - -\begin{code} -hashExpr :: CoreExpr -> Int --- ^ Two expressions that hash to the same @Int@ may be equal (but may not be) --- Two expressions that hash to the different Ints are definitely unequal. --- --- The emphasis is on a crude, fast hash, rather than on high precision. --- --- But unequal here means \"not identical\"; two alpha-equivalent --- expressions may hash to the different Ints. --- --- We must be careful that @\\x.x@ and @\\y.y@ map to the same hash code, --- (at least if we want the above invariant to be true). - -hashExpr e = fromIntegral (hash_expr (1,emptyVarEnv) e .&. 0x7fffffff) - -- UniqFM doesn't like negative Ints - -type HashEnv = (Int, VarEnv Int) -- Hash code for bound variables - -hash_expr :: HashEnv -> CoreExpr -> Word32 --- Word32, because we're expecting overflows here, and overflowing --- signed types just isn't cool. In C it's even undefined. -hash_expr env (Tick _ e) = hash_expr env e -hash_expr env (Cast e _) = hash_expr env e -hash_expr env (Var v) = hashVar env v -hash_expr _ (Lit lit) = fromIntegral (hashLiteral lit) -hash_expr env (App f e) = hash_expr env f * fast_hash_expr env e -hash_expr env (Let (NonRec b r) e) = hash_expr (extend_env env b) e * fast_hash_expr env r -hash_expr env (Let (Rec ((b,_):_)) e) = hash_expr (extend_env env b) e -hash_expr _ (Let (Rec []) _) = panic "hash_expr: Let (Rec []) _" -hash_expr env (Case e _ _ _) = hash_expr env e -hash_expr env (Lam b e) = hash_expr (extend_env env b) e -hash_expr env (Coercion co) = fast_hash_co env co -hash_expr _ (Type _) = WARN(True, text "hash_expr: type") 1 --- Shouldn't happen. Better to use WARN than trace, because trace --- prevents the CPR optimisation kicking in for hash_expr. - -fast_hash_expr :: HashEnv -> CoreExpr -> Word32 -fast_hash_expr env (Var v) = hashVar env v -fast_hash_expr env (Type t) = fast_hash_type env t -fast_hash_expr env (Coercion co) = fast_hash_co env co -fast_hash_expr _ (Lit lit) = fromIntegral (hashLiteral lit) -fast_hash_expr env (Cast e _) = fast_hash_expr env e -fast_hash_expr env (Tick _ e) = fast_hash_expr env e -fast_hash_expr env (App _ a) = fast_hash_expr env a -- A bit idiosyncratic ('a' not 'f')! -fast_hash_expr _ _ = 1 - -fast_hash_type :: HashEnv -> Type -> Word32 -fast_hash_type env ty - | Just tv <- getTyVar_maybe ty = hashVar env tv - | Just (tc,tys) <- splitTyConApp_maybe ty = let hash_tc = fromIntegral (hashName (tyConName tc)) - in foldr (\t n -> fast_hash_type env t + n) hash_tc tys - | otherwise = 1 - -fast_hash_co :: HashEnv -> Coercion -> Word32 -fast_hash_co env co - | Just cv <- getCoVar_maybe co = hashVar env cv - | Just (tc,cos) <- splitTyConAppCo_maybe co = let hash_tc = fromIntegral (hashName (tyConName tc)) - in foldr (\c n -> fast_hash_co env c + n) hash_tc cos - | otherwise = 1 - -extend_env :: HashEnv -> Var -> (Int, VarEnv Int) -extend_env (n,env) b = (n+1, extendVarEnv env b n) - -hashVar :: HashEnv -> Var -> Word32 -hashVar (_,env) v - = fromIntegral (lookupVarEnv env v `orElse` hashName (idName v)) -\end{code} - - %************************************************************************ %* * Eta reduction diff --git a/compiler/coreSyn/PprCore.lhs b/compiler/coreSyn/PprCore.lhs index 0ead297eb8be..0a6914e0b82e 100644 --- a/compiler/coreSyn/PprCore.lhs +++ b/compiler/coreSyn/PprCore.lhs @@ -429,8 +429,10 @@ instance Outputable UnfoldingSource where instance Outputable Unfolding where ppr NoUnfolding = ptext (sLit "No unfolding") ppr (OtherCon cs) = ptext (sLit "OtherCon") <+> ppr cs - ppr (DFunUnfolding ar con ops) = ptext (sLit "DFun") <> parens (ptext (sLit "arity=") <> int ar) - <+> ppr con <+> brackets (pprWithCommas ppr ops) + ppr (DFunUnfolding { df_bndrs = bndrs, df_con = con, df_args = args }) + = hang (ptext (sLit "DFun:") <+> ptext (sLit "\\") + <+> sep (map (pprBndr LambdaBind) bndrs) <+> arrow) + 2 (ppr con <+> sep (map ppr args)) ppr (CoreUnfolding { uf_src = src , uf_tmpl=rhs, uf_is_top=top, uf_is_value=hnf , uf_is_conlike=conlike, uf_is_work_free=wf @@ -451,10 +453,6 @@ instance Outputable Unfolding where | otherwise = empty -- Don't print the RHS or we get a quadratic -- blowup in the size of the printout! - -instance Outputable e => Outputable (DFunArg e) where - ppr (DFunPolyArg e) = braces (ppr e) - ppr (DFunLamArg i) = char '<' <> int i <> char '>' \end{code} ----------------------------------------------------- diff --git a/compiler/deSugar/DsBinds.lhs b/compiler/deSugar/DsBinds.lhs index 62793acfd366..66022f970ef6 100644 --- a/compiler/deSugar/DsBinds.lhs +++ b/compiler/deSugar/DsBinds.lhs @@ -447,24 +447,24 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl)) = putSrcSpanDs loc $ do { uniq <- newUnique ; let poly_name = idName poly_id - spec_name = mkClonedInternalName uniq poly_name + spec_occ = mkSpecOcc (getOccName poly_name) + spec_name = mkInternalName uniq spec_occ (getSrcSpan poly_name) ; (bndrs, ds_lhs) <- liftM collectBinders (dsHsWrapper spec_co (Var poly_id)) ; let spec_ty = mkPiTypes bndrs (exprType ds_lhs) ; case decomposeRuleLhs bndrs ds_lhs of { Left msg -> do { warnDs msg; return Nothing } ; - Right (final_bndrs, _fn, args) -> do + Right (rule_bndrs, _fn, args) -> do - { (spec_unf, unf_pairs) <- specUnfolding spec_co spec_ty (realIdUnfolding poly_id) - - ; dflags <- getDynFlags - ; let spec_id = mkLocalId spec_name spec_ty + { dflags <- getDynFlags + ; let spec_unf = specUnfolding bndrs args (realIdUnfolding poly_id) + spec_id = mkLocalId spec_name spec_ty `setInlinePragma` inl_prag `setIdUnfolding` spec_unf rule = mkRule False {- Not auto -} is_local_id (mkFastString ("SPEC " ++ showPpr dflags poly_name)) rule_act poly_name - final_bndrs args + rule_bndrs args (mkVarApps (Var spec_id) bndrs) ; spec_rhs <- dsHsWrapper spec_co poly_rhs @@ -472,7 +472,7 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl)) ; when (isInlinePragma id_inl && wopt Opt_WarnPointlessPragmas dflags) (warnDs (specOnInline poly_name)) - ; return (Just (spec_pair `consOL` unf_pairs, rule)) + ; return (Just (unitOL spec_pair, rule)) } } } where is_local_id = isJust mb_poly_rhs @@ -509,18 +509,15 @@ dsSpec mb_poly_rhs (L loc (SpecPrag poly_id spec_co spec_inl)) | otherwise = spec_prag_act -- Specified by user -specUnfolding :: HsWrapper -> Type - -> Unfolding -> DsM (Unfolding, OrdList (Id,CoreExpr)) -{- [Dec 10: TEMPORARILY commented out, until we can straighten out how to - generate unfoldings for specialised DFuns +specUnfolding :: [Var] -> [CoreExpr] -> Unfolding -> Unfolding +specUnfolding new_bndrs new_args df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) + = ASSERT2( equalLength new_args bndrs, ppr df $$ ppr new_args $$ ppr new_bndrs ) + df { df_bndrs = new_bndrs, df_args = map (substExpr (text "specUnfolding") subst) args } + where + subst = mkOpenSubst (mkInScopeSet fvs) (bndrs `zip` new_args) + fvs = (exprsFreeVars args `delVarSetList` bndrs) `extendVarSetList` new_bndrs -specUnfolding wrap_fn spec_ty (DFunUnfolding _ _ ops) - = do { let spec_rhss = map wrap_fn ops - ; spec_ids <- mapM (mkSysLocalM (fsLit "spec") . exprType) spec_rhss - ; return (mkDFunUnfolding spec_ty (map Var spec_ids), toOL (spec_ids `zip` spec_rhss)) } --} -specUnfolding _ _ _ - = return (noUnfolding, nilOL) +specUnfolding _ _ _ = noUnfolding specOnInline :: Name -> MsgDoc specOnInline f = ptext (sLit "SPECIALISE pragma on INLINE function probably won't fire:") @@ -598,8 +595,8 @@ decomposeRuleLhs bndrs lhs opt_lhs = simpleOptExpr lhs check_bndrs fn args - | null (dead_bndrs) = Right (extra_dict_bndrs ++ bndrs, fn, args) - | otherwise = Left (vcat (map dead_msg dead_bndrs)) + | null dead_bndrs = Right (extra_dict_bndrs ++ bndrs, fn, args) + | otherwise = Left (vcat (map dead_msg dead_bndrs)) where arg_fvs = exprsFreeVars args diff --git a/compiler/deSugar/DsMeta.hs b/compiler/deSugar/DsMeta.hs index 7717ff872c8c..57b35b3d8ae1 100644 --- a/compiler/deSugar/DsMeta.hs +++ b/compiler/deSugar/DsMeta.hs @@ -13,24 +13,17 @@ -- a Royal Pain (triggers other recompilation). ----------------------------------------------------------------------------- -{-# OPTIONS -fno-warn-tabs #-} --- The above warning supression flag is a temporary kludge. --- While working on this module you are encouraged to remove it and --- detab the module (please do the detabbing in a separate patch). See --- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces --- for details - module DsMeta( dsBracket, - templateHaskellNames, qTyConName, nameTyConName, - liftName, liftStringName, expQTyConName, patQTyConName, + templateHaskellNames, qTyConName, nameTyConName, + liftName, liftStringName, expQTyConName, patQTyConName, decQTyConName, decsQTyConName, typeQTyConName, - decTyConName, typeTyConName, mkNameG_dName, mkNameG_vName, mkNameG_tcName, - quoteExpName, quotePatName, quoteDecName, quoteTypeName - ) where + decTyConName, typeTyConName, mkNameG_dName, mkNameG_vName, mkNameG_tcName, + quoteExpName, quotePatName, quoteDecName, quoteTypeName + ) where #include "HsVersions.h" -import {-# SOURCE #-} DsExpr ( dsExpr ) +import {-# SOURCE #-} DsExpr ( dsExpr ) import MatchLit import DsMonad @@ -105,7 +98,7 @@ dsBracket brack splices ------------------------------------------------------- --- Declarations +-- Declarations ------------------------------------------------------- repTopP :: LPat Name -> DsM (Core TH.PatQ) @@ -117,34 +110,34 @@ repTopDs :: HsGroup Name -> DsM (Core (TH.Q [TH.Dec])) repTopDs group = do { let { tv_bndrs = hsSigTvBinders (hs_valds group) ; bndrs = tv_bndrs ++ hsGroupBinders group } ; - ss <- mkGenSyms bndrs ; + ss <- mkGenSyms bndrs ; - -- Bind all the names mainly to avoid repeated use of explicit strings. - -- Thus we get - -- do { t :: String <- genSym "T" ; - -- return (Data t [] ...more t's... } - -- The other important reason is that the output must mention - -- only "T", not "Foo:T" where Foo is the current module + -- Bind all the names mainly to avoid repeated use of explicit strings. + -- Thus we get + -- do { t :: String <- genSym "T" ; + -- return (Data t [] ...more t's... } + -- The other important reason is that the output must mention + -- only "T", not "Foo:T" where Foo is the current module - decls <- addBinds ss (do { + decls <- addBinds ss (do { fix_ds <- mapM repFixD (hs_fixds group) ; - val_ds <- rep_val_binds (hs_valds group) ; - tycl_ds <- mapM repTyClD (concat (hs_tyclds group)) ; - inst_ds <- mapM repInstD (hs_instds group) ; - rule_ds <- mapM repRuleD (hs_ruleds group) ; - for_ds <- mapM repForD (hs_fords group) ; - -- more needed - return (de_loc $ sort_by_loc $ + val_ds <- rep_val_binds (hs_valds group) ; + tycl_ds <- mapM repTyClD (concat (hs_tyclds group)) ; + inst_ds <- mapM repInstD (hs_instds group) ; + rule_ds <- mapM repRuleD (hs_ruleds group) ; + for_ds <- mapM repForD (hs_fords group) ; + -- more needed + return (de_loc $ sort_by_loc $ val_ds ++ catMaybes tycl_ds ++ fix_ds ++ inst_ds ++ rule_ds ++ for_ds) }) ; - decl_ty <- lookupType decQTyConName ; - let { core_list = coreList' decl_ty decls } ; + decl_ty <- lookupType decQTyConName ; + let { core_list = coreList' decl_ty decls } ; - dec_ty <- lookupType decTyConName ; - q_decs <- repSequenceQ dec_ty core_list ; + dec_ty <- lookupType decTyConName ; + q_decs <- repSequenceQ dec_ty core_list ; - wrapGenSyms ss q_decs + wrapGenSyms ss q_decs } @@ -155,8 +148,8 @@ hsSigTvBinders binds , tv <- hsQTvBndrs qtvs] where sigs = case binds of - ValBindsIn _ sigs -> sigs - ValBindsOut _ sigs -> sigs + ValBindsIn _ sigs -> sigs + ValBindsOut _ sigs -> sigs {- Notes @@ -180,19 +173,19 @@ Note [Binders and occurrences] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we desugar [d| data T = MkT |] we want to get - Data "T" [] [Con "MkT" []] [] + Data "T" [] [Con "MkT" []] [] and *not* - Data "Foo:T" [] [Con "Foo:MkT" []] [] + Data "Foo:T" [] [Con "Foo:MkT" []] [] That is, the new data decl should fit into whatever new module it is asked to fit in. We do *not* clone, though; no need for this: - Data "T79" .... + Data "T79" .... But if we see this: - data T = MkT - foo = reifyDecl T + data T = MkT + foo = reifyDecl T then we must desugar to - foo = Data "Foo:T" [] [Con "Foo:MkT" []] [] + foo = Data "Foo:T" [] [Con "Foo:MkT" []] [] So in repTopDs we bring the binders into scope with mkGenSyms and addBinds. And we use lookupOcc, rather than lookupBinder @@ -207,39 +200,39 @@ repTyClD :: LTyClDecl Name -> DsM (Maybe (SrcSpan, Core TH.DecQ)) repTyClD (L loc (FamDecl { tcdFam = fam })) = liftM Just $ repFamilyDecl (L loc fam) repTyClD (L loc (SynDecl { tcdLName = tc, tcdTyVars = tvs, tcdRhs = rhs })) - = do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences] - ; dec <- addTyClTyVarBinds tvs $ \bndrs -> - repSynDecl tc1 bndrs rhs + = do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences] + ; dec <- addTyClTyVarBinds tvs $ \bndrs -> + repSynDecl tc1 bndrs rhs ; return (Just (loc, dec)) } repTyClD (L loc (DataDecl { tcdLName = tc, tcdTyVars = tvs, tcdDataDefn = defn })) - = do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences] + = do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences] ; tc_tvs <- mk_extra_tvs tc tvs defn - ; dec <- addTyClTyVarBinds tc_tvs $ \bndrs -> - repDataDefn tc1 bndrs Nothing (hsLTyVarNames tc_tvs) defn + ; dec <- addTyClTyVarBinds tc_tvs $ \bndrs -> + repDataDefn tc1 bndrs Nothing (hsLTyVarNames tc_tvs) defn ; return (Just (loc, dec)) } -repTyClD (L loc (ClassDecl { tcdCtxt = cxt, tcdLName = cls, - tcdTyVars = tvs, tcdFDs = fds, - tcdSigs = sigs, tcdMeths = meth_binds, +repTyClD (L loc (ClassDecl { tcdCtxt = cxt, tcdLName = cls, + tcdTyVars = tvs, tcdFDs = fds, + tcdSigs = sigs, tcdMeths = meth_binds, tcdATs = ats, tcdATDefs = [] })) - = do { cls1 <- lookupLOcc cls -- See note [Binders and occurrences] - ; dec <- addTyVarBinds tvs $ \bndrs -> + = do { cls1 <- lookupLOcc cls -- See note [Binders and occurrences] + ; dec <- addTyVarBinds tvs $ \bndrs -> do { cxt1 <- repLContext cxt - ; sigs1 <- rep_sigs sigs - ; binds1 <- rep_binds meth_binds - ; fds1 <- repLFunDeps fds + ; sigs1 <- rep_sigs sigs + ; binds1 <- rep_binds meth_binds + ; fds1 <- repLFunDeps fds ; ats1 <- repFamilyDecls ats - ; decls1 <- coreList decQTyConName (ats1 ++ sigs1 ++ binds1) - ; repClass cxt1 cls1 bndrs fds1 decls1 + ; decls1 <- coreList decQTyConName (ats1 ++ sigs1 ++ binds1) + ; repClass cxt1 cls1 bndrs fds1 decls1 } - ; return $ Just (loc, dec) + ; return $ Just (loc, dec) } -- Un-handled cases repTyClD (L loc d) = putSrcSpanDs loc $ - do { warnDs (hang ds_msg 4 (ppr d)) - ; return Nothing } + do { warnDs (hang ds_msg 4 (ppr d)) + ; return Nothing } ------------------------- repDataDefn :: Core TH.Name -> Core [TH.TyVarBndr] @@ -248,7 +241,7 @@ repDataDefn :: Core TH.Name -> Core [TH.TyVarBndr] -> DsM (Core TH.DecQ) repDataDefn tc bndrs opt_tys tv_names (HsDataDefn { dd_ND = new_or_data, dd_ctxt = cxt - , dd_cons = cons, dd_derivs = mb_derivs }) + , dd_cons = cons, dd_derivs = mb_derivs }) = do { cxt1 <- repLContext cxt ; derivs1 <- repDerivs mb_derivs ; case new_or_data of @@ -267,12 +260,11 @@ repSynDecl tc bndrs ty repFamilyDecl :: LFamilyDecl Name -> DsM (SrcSpan, Core TH.DecQ) repFamilyDecl (L loc (FamilyDecl { fdInfo = info, fdLName = tc, - fdTyVars = tvs, - fdKindSig = opt_kind })) - = do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences] + fdTyVars = tvs, + fdKindSig = opt_kind })) + = do { tc1 <- lookupLOcc tc -- See note [Binders and occurrences] ; dec <- addTyClTyVarBinds tvs $ \bndrs -> - do { - ; case (opt_kind, info) of + case (opt_kind, info) of (Nothing, ClosedTypeFamily eqns) -> do { eqns1 <- mapM repTyFamEqn eqns ; eqns2 <- coreList tySynEqnQTyConName eqns1 @@ -289,7 +281,6 @@ repFamilyDecl (L loc (FamilyDecl { fdInfo = info, do { info' <- repFamilyInfo info ; ki1 <- repLKind ki ; repFamilyKind info' tc1 bndrs ki1 } - } ; return (loc, dec) } @@ -297,7 +288,7 @@ repFamilyDecls :: [LFamilyDecl Name] -> DsM [Core TH.DecQ] repFamilyDecls fds = liftM de_loc (mapM repFamilyDecl fds) ------------------------- -mk_extra_tvs :: Located Name -> LHsTyVarBndrs Name +mk_extra_tvs :: Located Name -> LHsTyVarBndrs Name -> HsDataDefn Name -> DsM (LHsTyVarBndrs Name) -- If there is a kind signature it must be of form -- k1 -> .. -> kn -> * @@ -360,15 +351,15 @@ repClsInstD (ClsInstDecl { cid_poly_ty = ty, cid_binds = binds , cid_sigs = prags, cid_tyfam_insts = ats , cid_datafam_insts = adts }) = addTyVarBinds tvs $ \_ -> - -- We must bring the type variables into scope, so their - -- occurrences don't fail, even though the binders don't + -- We must bring the type variables into scope, so their + -- occurrences don't fail, even though the binders don't -- appear in the resulting data structure - -- - -- But we do NOT bring the binders of 'binds' into scope - -- because they are properly regarded as occurrences - -- For example, the method names should be bound to - -- the selector Ids, not to fresh names (Trac #5410) - -- + -- + -- But we do NOT bring the binders of 'binds' into scope + -- because they are properly regarded as occurrences + -- For example, the method names should be bound to + -- the selector Ids, not to fresh names (Trac #5410) + -- do { cxt1 <- repContext cxt ; cls_tcon <- repTy (HsTyVar (unLoc cls)) ; cls_tys <- repLTys tys @@ -385,9 +376,21 @@ repClsInstD (ClsInstDecl { cid_poly_ty = ty, cid_binds = binds repTyFamInstD :: TyFamInstDecl Name -> DsM (Core TH.DecQ) repTyFamInstD decl@(TyFamInstDecl { tfid_eqn = eqn }) = do { let tc_name = tyFamInstDeclLName decl +<<<<<<< HEAD ; tc <- lookupLOcc tc_name -- See note [Binders and occurrences] ; eqn1 <- repTyFamEqn eqn ; repTySynInst tc eqn1 } +||||||| merged common ancestors + ; tc <- lookupLOcc tc_name -- See note [Binders and occurrences] + ; eqns1 <- mapM repTyFamEqn eqns + ; eqns2 <- coreList tySynEqnQTyConName eqns1 + ; repTySynInst tc eqns2 } +======= + ; tc <- lookupLOcc tc_name -- See note [Binders and occurrences] + ; eqns1 <- mapM repTyFamEqn eqns + ; eqns2 <- coreList tySynEqnQTyConName eqns1 + ; repTySynInst tc eqns2 } +>>>>>>> master repTyFamEqn :: LTyFamInstEqn Name -> DsM (Core TH.TySynEqnQ) repTyFamEqn (L loc (TyFamInstEqn { tfie_pats = HsWB { hswb_cts = tys @@ -406,7 +409,7 @@ repDataFamInstD :: DataFamInstDecl Name -> DsM (Core TH.DecQ) repDataFamInstD (DataFamInstDecl { dfid_tycon = tc_name , dfid_pats = HsWB { hswb_cts = tys, hswb_kvs = kv_names, hswb_tvs = tv_names } , dfid_defn = defn }) - = do { tc <- lookupLOcc tc_name -- See note [Binders and occurrences] + = do { tc <- lookupLOcc tc_name -- See note [Binders and occurrences] ; let loc = getLoc tc_name hs_tvs = HsQTvs { hsq_kvs = kv_names, hsq_tvs = userHsTyVarBndrs loc tv_names } -- Yuk ; addTyClTyVarBinds hs_tvs $ \ bndrs -> @@ -474,7 +477,7 @@ repRuleD (L loc (HsRule n act bndrs lhs _ rhs _)) ruleBndrNames :: RuleBndr Name -> [Name] ruleBndrNames (RuleBndr n) = [unLoc n] -ruleBndrNames (RuleBndrSig n (HsWB { hswb_kvs = kvs, hswb_tvs = tvs })) +ruleBndrNames (RuleBndrSig n (HsWB { hswb_kvs = kvs, hswb_tvs = tvs })) = unLoc n : kvs ++ tvs repRuleBndr :: RuleBndr Name -> DsM (Core TH.RuleBndrQ) @@ -490,14 +493,14 @@ ds_msg :: SDoc ds_msg = ptext (sLit "Cannot desugar this Template Haskell declaration:") ------------------------------------------------------- --- Constructors +-- Constructors ------------------------------------------------------- repC :: [Name] -> LConDecl Name -> DsM (Core TH.ConQ) repC _ (L _ (ConDecl { con_name = con, con_qvars = con_tvs, con_cxt = L _ [] , con_details = details, con_res = ResTyH98 })) | null (hsQTvBndrs con_tvs) - = do { con1 <- lookupLOcc con -- See Note [Binders and occurrences] + = do { con1 <- lookupLOcc con -- See Note [Binders and occurrences] ; repConstr con1 details } repC tvs (L _ (ConDecl { con_name = con @@ -508,10 +511,10 @@ repC tvs (L _ (ConDecl { con_name = con ; let ex_tvs = HsQTvs { hsq_kvs = filterOut (in_subst con_tv_subst) (hsq_kvs con_tvs) , hsq_tvs = filterOut (in_subst con_tv_subst . hsLTyVarName) (hsq_tvs con_tvs) } - ; binds <- mapM dupBinder con_tv_subst + ; binds <- mapM dupBinder con_tv_subst ; dsExtendMetaEnv (mkNameEnv binds) $ -- Binds some of the con_tvs addTyVarBinds ex_tvs $ \ ex_bndrs -> -- Binds the remaining con_tvs - do { con1 <- lookupLOcc con -- See Note [Binders and occurrences] + do { con1 <- lookupLOcc con -- See Note [Binders and occurrences] ; c' <- repConstr con1 details ; ctxt' <- repContext (eq_ctxt ++ ctxt) ; rep2 forallCName [unC ex_bndrs, unC ctxt', unC c'] } } @@ -520,9 +523,9 @@ in_subst :: [(Name,Name)] -> Name -> Bool in_subst [] _ = False in_subst ((n',_):ns) n = n==n' || in_subst ns n -mkGadtCtxt :: [Name] -- Tyvars of the data type +mkGadtCtxt :: [Name] -- Tyvars of the data type -> ResType (LHsType Name) - -> DsM (HsContext Name, [(Name,Name)]) + -> DsM (HsContext Name, [(Name,Name)]) -- Given a data type in GADT syntax, figure out the equality -- context, so that we can represent it with an explicit -- equality context, because that is the only way to express @@ -570,12 +573,12 @@ repBangTy ty= do rep2 strictTypeName [s, t] where (str, ty') = case ty of - L _ (HsBangTy (HsUserBang (Just True) True) ty) -> (unpackedName, ty) - L _ (HsBangTy (HsUserBang _ True) ty) -> (isStrictName, ty) - _ -> (notStrictName, ty) + L _ (HsBangTy (HsUserBang (Just True) True) ty) -> (unpackedName, ty) + L _ (HsBangTy (HsUserBang _ True) ty) -> (isStrictName, ty) + _ -> (notStrictName, ty) ------------------------------------------------------- --- Deriving clause +-- Deriving clause ------------------------------------------------------- repDerivs :: Maybe [LHsType Name] -> DsM (Core [TH.Name]) @@ -584,7 +587,7 @@ repDerivs (Just ctxt) = repList nameTyConName rep_deriv ctxt where rep_deriv :: LHsType Name -> DsM (Core TH.Name) - -- Deriving clauses must have the simple H98 form + -- Deriving clauses must have the simple H98 form rep_deriv ty | Just (cls, []) <- splitHsClassTy_maybe (unLoc ty) = lookupOcc cls @@ -601,13 +604,13 @@ rep_sigs sigs = do locs_cores <- rep_sigs' sigs return $ de_loc $ sort_by_loc locs_cores rep_sigs' :: [LSig Name] -> DsM [(SrcSpan, Core TH.DecQ)] - -- We silently ignore ones we don't recognise + -- We silently ignore ones we don't recognise rep_sigs' sigs = do { sigs1 <- mapM rep_sig sigs ; - return (concat sigs1) } + return (concat sigs1) } rep_sig :: LSig Name -> DsM [(SrcSpan, Core TH.DecQ)] - -- Singleton => Ok - -- Empty => Too hard, signature ignored + -- Singleton => Ok + -- Empty => Too hard, signature ignored rep_sig (L loc (TypeSig nms ty)) = mapM (rep_ty_sig loc ty) nms rep_sig (L _ (GenericSig nm _)) = failWithDs msg where msg = vcat [ ptext (sLit "Illegal default signature for") <+> quotes (ppr nm) @@ -640,7 +643,7 @@ rep_ty_sig loc (L _ ty) nm rep_inline :: Located Name - -> InlinePragma -- Never defaultInlinePragma + -> InlinePragma -- Never defaultInlinePragma -> SrcSpan -> DsM [(SrcSpan, Core TH.DecQ)] rep_inline nm ispec loc @@ -692,10 +695,10 @@ repPhases (ActiveAfter i) = do { MkC arg <- coreIntLit i repPhases _ = dataCon allPhasesDataConName ------------------------------------------------------- --- Types +-- Types ------------------------------------------------------- -addTyVarBinds :: LHsTyVarBndrs Name -- the binders to be added +addTyVarBinds :: LHsTyVarBndrs Name -- the binders to be added -> (Core [TH.TyVarBndr] -> DsM (Core (TH.Q a))) -- action in the ext env -> DsM (Core (TH.Q a)) -- gensym a list of type variables and enter them into the meta environment; @@ -704,9 +707,9 @@ addTyVarBinds :: LHsTyVarBndrs Name -- the binders to be addTyVarBinds tvs m = do { freshNames <- mkGenSyms (hsLKiTyVarNames tvs) - ; term <- addBinds freshNames $ - do { kbs <- repList tyVarBndrTyConName mk_tv_bndr (hsQTvBndrs tvs `zip` freshNames) - ; m kbs } + ; term <- addBinds freshNames $ + do { kbs <- repList tyVarBndrTyConName mk_tv_bndr (hsQTvBndrs tvs `zip` freshNames) + ; m kbs } ; wrapGenSyms freshNames term } where mk_tv_bndr (tv, (_,v)) = repTyVarBndrWithKind tv (coreVar v) @@ -724,12 +727,12 @@ addTyClTyVarBinds tvs m = do { let tv_names = hsLKiTyVarNames tvs ; env <- dsGetMetaEnv ; freshNames <- mkGenSyms (filterOut (`elemNameEnv` env) tv_names) - -- Make fresh names for the ones that are not already in scope + -- Make fresh names for the ones that are not already in scope -- This makes things work for family declarations - ; term <- addBinds freshNames $ - do { kbs <- repList tyVarBndrTyConName mk_tv_bndr (hsQTvBndrs tvs) - ; m kbs } + ; term <- addBinds freshNames $ + do { kbs <- repList tyVarBndrTyConName mk_tv_bndr (hsQTvBndrs tvs) + ; m kbs } ; wrapGenSyms freshNames term } where @@ -738,7 +741,7 @@ addTyClTyVarBinds tvs m -- Produce kinded binder constructors from the Haskell tyvar binders -- -repTyVarBndrWithKind :: LHsTyVarBndr Name +repTyVarBndrWithKind :: LHsTyVarBndr Name -> Core TH.Name -> DsM (Core TH.TyVarBndr) repTyVarBndrWithKind (L _ (UserTyVar {})) nm = repPlainTV nm @@ -752,7 +755,7 @@ repLContext (L _ ctxt) = repContext ctxt repContext :: HsContext Name -> DsM (Core TH.CxtQ) repContext ctxt = do preds <- repList predQTyConName repLPred ctxt - repCtxt preds + repCtxt preds -- represent a type predicate -- @@ -793,41 +796,41 @@ repTy (HsForAllTy _ tvs ctxt ty) = repTy (HsTyVar n) | isTvOcc occ = do tv1 <- lookupOcc n - repTvar tv1 + repTvar tv1 | isDataOcc occ = do tc1 <- lookupOcc n repPromotedTyCon tc1 - | otherwise = do tc1 <- lookupOcc n - repNamedTyCon tc1 + | otherwise = do tc1 <- lookupOcc n + repNamedTyCon tc1 where occ = nameOccName n repTy (HsAppTy f a) = do - f1 <- repLTy f - a1 <- repLTy a - repTapp f1 a1 + f1 <- repLTy f + a1 <- repLTy a + repTapp f1 a1 repTy (HsFunTy f a) = do - f1 <- repLTy f - a1 <- repLTy a - tcon <- repArrowTyCon - repTapps tcon [f1, a1] -repTy (HsListTy t) = do - t1 <- repLTy t - tcon <- repListTyCon - repTapp tcon t1 + f1 <- repLTy f + a1 <- repLTy a + tcon <- repArrowTyCon + repTapps tcon [f1, a1] +repTy (HsListTy t) = do + t1 <- repLTy t + tcon <- repListTyCon + repTapp tcon t1 repTy (HsPArrTy t) = do - t1 <- repLTy t - tcon <- repTy (HsTyVar (tyConName parrTyCon)) - repTapp tcon t1 + t1 <- repLTy t + tcon <- repTy (HsTyVar (tyConName parrTyCon)) + repTapp tcon t1 repTy (HsTupleTy HsUnboxedTuple tys) = do - tys1 <- repLTys tys - tcon <- repUnboxedTupleTyCon (length tys) - repTapps tcon tys1 + tys1 <- repLTys tys + tcon <- repUnboxedTupleTyCon (length tys) + repTapps tcon tys1 repTy (HsTupleTy _ tys) = do tys1 <- repLTys tys tcon <- repTupleTyCon (length tys) repTapps tcon tys1 repTy (HsOpTy ty1 (_, n) ty2) = repLTy ((nlHsTyVar (unLoc n) `nlHsAppTy` ty1) - `nlHsAppTy` ty2) -repTy (HsParTy t) = repLTy t + `nlHsAppTy` ty2) +repTy (HsParTy t) = repLTy t repTy (HsKindSig t k) = do t1 <- repLTy t k1 <- repLKind k @@ -843,7 +846,7 @@ repTy (HsExplicitTupleTy _ tys) = do repTy (HsTyLit lit) = do lit' <- repTyLit lit repTLit lit' -repTy ty = notHandled "Exotic form of type" (ppr ty) +repTy ty = notHandled "Exotic form of type" (ppr ty) repTyLit :: HsTyLit -> DsM (Core TH.TyLitQ) repTyLit (HsNumTy i) = do dflags <- getDynFlags @@ -888,7 +891,7 @@ repNonArrowKind (HsTupleTy _ ks) = do { ks' <- mapM repLKind ks repNonArrowKind k = notHandled "Exotic form of kind" (ppr k) ----------------------------------------------------------------------------- --- Splices +-- Splices ----------------------------------------------------------------------------- repSplice :: HsSplice Name -> DsM (Core a) @@ -897,21 +900,21 @@ repSplice :: HsSplice Name -> DsM (Core a) repSplice (HsSplice n _) = do { mb_val <- dsLookupMetaEnv n ; case mb_val of - Just (Splice e) -> do { e' <- dsExpr e - ; return (MkC e') } - _ -> pprPanic "HsSplice" (ppr n) } - -- Should not happen; statically checked + Just (Splice e) -> do { e' <- dsExpr e + ; return (MkC e') } + _ -> pprPanic "HsSplice" (ppr n) } + -- Should not happen; statically checked ----------------------------------------------------------------------------- --- Expressions +-- Expressions ----------------------------------------------------------------------------- repLEs :: [LHsExpr Name] -> DsM (Core [TH.ExpQ]) repLEs es = repList expQTyConName repLE es -- FIXME: some of these panics should be converted into proper error messages --- unless we can make sure that constructs, which are plainly not --- supported in TH already lead to error messages at an earlier stage +-- unless we can make sure that constructs, which are plainly not +-- supported in TH already lead to error messages at an earlier stage repLE :: LHsExpr Name -> DsM (Core TH.ExpQ) repLE (L loc e) = putSrcSpanDs loc (repE e) @@ -919,15 +922,15 @@ repE :: HsExpr Name -> DsM (Core TH.ExpQ) repE (HsVar x) = do { mb_val <- dsLookupMetaEnv x ; case mb_val of - Nothing -> do { str <- globalVar x - ; repVarOrCon x str } - Just (Bound y) -> repVarOrCon x (coreVar y) - Just (Splice e) -> do { e' <- dsExpr e - ; return (MkC e') } } + Nothing -> do { str <- globalVar x + ; repVarOrCon x str } + Just (Bound y) -> repVarOrCon x (coreVar y) + Just (Splice e) -> do { e' <- dsExpr e + ; return (MkC e') } } repE e@(HsIPVar _) = notHandled "Implicit parameters" (ppr e) - -- Remember, we're desugaring renamer output here, so - -- HsOverlit can definitely occur + -- Remember, we're desugaring renamer output here, so + -- HsOverlit can definitely occur repE (HsOverLit l) = do { a <- repOverloadedLiteral l; repLit a } repE (HsLit l) = do { a <- repLiteral l; repLit a } repE (HsLam (MG { mg_alts = [m] })) = repLambda m @@ -943,9 +946,9 @@ repE (OpApp e1 op _ e2) = the_op <- repLE op ; repInfixApp arg1 the_op arg2 } repE (NegApp x _) = do - a <- repLE x - negateVar <- lookupOcc negateName >>= repVar - negateVar `repApp` a + a <- repLE x + negateVar <- lookupOcc negateName >>= repVar + negateVar `repApp` a repE (HsPar x) = repLE x repE (SectionL x y) = do { a <- repLE x; b <- repLE y; repSectionL a b } repE (SectionR x y) = do { a <- repLE x; b <- repLE y; repSectionR a b } @@ -955,18 +958,18 @@ repE (HsCase e (MG { mg_alts = ms })) ; core_ms2 <- coreList matchQTyConName ms2 ; repCaseE arg core_ms2 } repE (HsIf _ x y z) = do - a <- repLE x - b <- repLE y - c <- repLE z - repCond a b c + a <- repLE x + b <- repLE y + c <- repLE z + repCond a b c repE (HsMultiIf _ alts) = do { (binds, alts') <- liftM unzip $ mapM repLGRHS alts ; expr' <- repMultiIf (nonEmptyCoreList alts') ; wrapGenSyms (concat binds) expr' } repE (HsLet bs e) = do { (ss,ds) <- repBinds bs - ; e2 <- addBinds ss (repLE e) - ; z <- repLetE ds e2 - ; wrapGenSyms ss z } + ; e2 <- addBinds ss (repLE e) + ; z <- repLetE ds e2 + ; wrapGenSyms ss z } -- FIXME: I haven't got the types here right yet repE e@(HsDo ctxt sts _) @@ -1004,18 +1007,18 @@ repE (ArithSeq _ _ aseq) = case aseq of From e -> do { ds1 <- repLE e; repFrom ds1 } FromThen e1 e2 -> do - ds1 <- repLE e1 - ds2 <- repLE e2 - repFromThen ds1 ds2 + ds1 <- repLE e1 + ds2 <- repLE e2 + repFromThen ds1 ds2 FromTo e1 e2 -> do - ds1 <- repLE e1 - ds2 <- repLE e2 - repFromTo ds1 ds2 + ds1 <- repLE e1 + ds2 <- repLE e2 + repFromTo ds1 ds2 FromThenTo e1 e2 e3 -> do - ds1 <- repLE e1 - ds2 <- repLE e2 - ds3 <- repLE e3 - repFromThenTo ds1 ds2 ds3 + ds1 <- repLE e1 + ds2 <- repLE e2 + ds3 <- repLE e3 + repFromThenTo ds1 ds2 ds3 repE (HsSpliceE splice) = repSplice splice repE e@(PArrSeq {}) = notHandled "Parallel arrays" (ppr e) @@ -1023,7 +1026,7 @@ repE e@(HsCoreAnn {}) = notHandled "Core annotations" (ppr e) repE e@(HsSCC {}) = notHandled "Cost centres" (ppr e) repE e@(HsTickPragma {}) = notHandled "Tick Pragma" (ppr e) repE e@(HsBracketOut {}) = notHandled "TH brackets" (ppr e) -repE e = notHandled "Expression form" (ppr e) +repE e = notHandled "Expression form" (ppr e) ----------------------------------------------------------------------------- -- Building representations of auxillary structures like Match, Clause, Stmt, @@ -1135,41 +1138,41 @@ repSts other = notHandled "Exotic statement" (ppr other) ----------------------------------------------------------- --- Bindings +-- Bindings ----------------------------------------------------------- repBinds :: HsLocalBinds Name -> DsM ([GenSymBind], Core [TH.DecQ]) repBinds EmptyLocalBinds - = do { core_list <- coreList decQTyConName [] - ; return ([], core_list) } + = do { core_list <- coreList decQTyConName [] + ; return ([], core_list) } repBinds b@(HsIPBinds _) = notHandled "Implicit parameters" (ppr b) repBinds (HsValBinds decs) - = do { let { bndrs = hsSigTvBinders decs ++ collectHsValBinders decs } - -- No need to worrry about detailed scopes within - -- the binding group, because we are talking Names - -- here, so we can safely treat it as a mutually - -- recursive group + = do { let { bndrs = hsSigTvBinders decs ++ collectHsValBinders decs } + -- No need to worrry about detailed scopes within + -- the binding group, because we are talking Names + -- here, so we can safely treat it as a mutually + -- recursive group -- For hsSigTvBinders see Note [Scoped type variables in bindings] - ; ss <- mkGenSyms bndrs - ; prs <- addBinds ss (rep_val_binds decs) - ; core_list <- coreList decQTyConName - (de_loc (sort_by_loc prs)) - ; return (ss, core_list) } + ; ss <- mkGenSyms bndrs + ; prs <- addBinds ss (rep_val_binds decs) + ; core_list <- coreList decQTyConName + (de_loc (sort_by_loc prs)) + ; return (ss, core_list) } rep_val_binds :: HsValBinds Name -> DsM [(SrcSpan, Core TH.DecQ)] -- Assumes: all the binders of the binding are alrady in the meta-env rep_val_binds (ValBindsOut binds sigs) = do { core1 <- rep_binds' (unionManyBags (map snd binds)) - ; core2 <- rep_sigs' sigs - ; return (core1 ++ core2) } + ; core2 <- rep_sigs' sigs + ; return (core1 ++ core2) } rep_val_binds (ValBindsIn _ _) = panic "rep_val_binds: ValBindsIn" rep_binds :: LHsBinds Name -> DsM [Core TH.DecQ] rep_binds binds = do { binds_w_locs <- rep_binds' binds - ; return (de_loc (sort_by_loc binds_w_locs)) } + ; return (de_loc (sort_by_loc binds_w_locs)) } rep_binds' :: LHsBinds Name -> DsM [(SrcSpan, Core TH.DecQ)] rep_binds' binds = mapM rep_bind (bagToList binds) @@ -1181,35 +1184,35 @@ rep_bind :: LHsBind Name -> DsM (SrcSpan, Core TH.DecQ) -- e.g. x = g 5 as a Fun MonoBinds. This is indicated by a single match -- with an empty list of patterns rep_bind (L loc (FunBind { fun_id = fn, - fun_matches = MG { mg_alts = [L _ (Match [] _ (GRHSs guards wheres))] } })) + fun_matches = MG { mg_alts = [L _ (Match [] _ (GRHSs guards wheres))] } })) = do { (ss,wherecore) <- repBinds wheres - ; guardcore <- addBinds ss (repGuards guards) - ; fn' <- lookupLBinder fn - ; p <- repPvar fn' - ; ans <- repVal p guardcore wherecore - ; ans' <- wrapGenSyms ss ans - ; return (loc, ans') } + ; guardcore <- addBinds ss (repGuards guards) + ; fn' <- lookupLBinder fn + ; p <- repPvar fn' + ; ans <- repVal p guardcore wherecore + ; ans' <- wrapGenSyms ss ans + ; return (loc, ans') } rep_bind (L loc (FunBind { fun_id = fn, fun_matches = MG { mg_alts = ms } })) = do { ms1 <- mapM repClauseTup ms - ; fn' <- lookupLBinder fn + ; fn' <- lookupLBinder fn ; ans <- repFun fn' (nonEmptyCoreList ms1) ; return (loc, ans) } rep_bind (L loc (PatBind { pat_lhs = pat, pat_rhs = GRHSs guards wheres })) = do { patcore <- repLP pat ; (ss,wherecore) <- repBinds wheres - ; guardcore <- addBinds ss (repGuards guards) + ; guardcore <- addBinds ss (repGuards guards) ; ans <- repVal patcore guardcore wherecore - ; ans' <- wrapGenSyms ss ans + ; ans' <- wrapGenSyms ss ans ; return (loc, ans') } rep_bind (L _ (VarBind { var_id = v, var_rhs = e})) = do { v' <- lookupBinder v - ; e2 <- repLE e + ; e2 <- repLE e ; x <- repNormal e2 ; patcore <- repPvar v' - ; empty_decls <- coreList decQTyConName [] + ; empty_decls <- coreList decQTyConName [] ; ans <- repVal patcore x empty_decls ; return (srcLocSpan (getSrcLoc v), ans) } @@ -1244,14 +1247,14 @@ repLambda (L _ (Match ps _ (GRHSs [L _ (GRHS [] e)] EmptyLocalBinds))) = do { let bndrs = collectPatsBinders ps ; ; ss <- mkGenSyms bndrs ; lam <- addBinds ss ( - do { xs <- repLPs ps; body <- repLE e; repLam xs body }) + do { xs <- repLPs ps; body <- repLE e; repLam xs body }) ; wrapGenSyms ss lam } repLambda (L _ m) = notHandled "Guarded labmdas" (pprMatch (LambdaExpr :: HsMatchContext Name) m) ----------------------------------------------------------------------------- --- Patterns +-- Patterns -- repP deals with patterns. It assumes that we have already -- walked over the pattern(s) once to collect the binders, and -- have extended the environment. So every pattern-bound @@ -1291,17 +1294,17 @@ repP (ConPatIn dc details) rep_fld fld = do { MkC v <- lookupLOcc (hsRecFieldId fld) ; MkC p <- repLP (hsRecFieldArg fld) ; rep2 fieldPatName [v,p] } - + repP (NPat l Nothing _) = do { a <- repOverloadedLiteral l; repPlit a } repP (ViewPat e p _) = do { e' <- repLE e; p' <- repLP p; repPview e' p' } repP p@(NPat _ (Just _) _) = notHandled "Negative overloaded patterns" (ppr p) repP p@(SigPatIn {}) = notHandled "Type signatures in patterns" (ppr p) - -- The problem is to do with scoped type variables. - -- To implement them, we have to implement the scoping rules - -- here in DsMeta, and I don't want to do that today! - -- do { p' <- repLP p; t' <- repLTy t; repPsig p' t' } - -- repPsig :: Core TH.PatQ -> Core TH.TypeQ -> DsM (Core TH.PatQ) - -- repPsig (MkC p) (MkC t) = rep2 sigPName [p, t] + -- The problem is to do with scoped type variables. + -- To implement them, we have to implement the scoping rules + -- here in DsMeta, and I don't want to do that today! + -- do { p' <- repLP p; t' <- repLTy t; repPsig p' t' } + -- repPsig :: Core TH.PatQ -> Core TH.TypeQ -> DsM (Core TH.PatQ) + -- repPsig (MkC p) (MkC t) = rep2 sigPName [p, t] repP other = notHandled "Exotic pattern" (ppr other) @@ -1316,20 +1319,20 @@ de_loc :: [(a, b)] -> [b] de_loc = map snd ---------------------------------------------------------- --- The meta-environment +-- The meta-environment -- A name/identifier association for fresh names of locally bound entities -type GenSymBind = (Name, Id) -- Gensym the string and bind it to the Id - -- I.e. (x, x_id) means - -- let x_id = gensym "x" in ... +type GenSymBind = (Name, Id) -- Gensym the string and bind it to the Id + -- I.e. (x, x_id) means + -- let x_id = gensym "x" in ... -- Generate a fresh name for a locally bound entity mkGenSyms :: [Name] -> DsM [GenSymBind] -- We can use the existing name. For example: --- [| \x_77 -> x_77 + x_77 |] +-- [| \x_77 -> x_77 + x_77 |] -- desugars to --- do { x_77 <- genSym "x"; .... } +-- do { x_77 <- genSym "x"; .... } -- We use the same x_77 in the desugared program, but with the type Bndr -- instead of Int -- @@ -1337,7 +1340,7 @@ mkGenSyms :: [Name] -> DsM [GenSymBind] -- -- Nevertheless, it's monadic because we have to generate nameTy mkGenSyms ns = do { var_ty <- lookupType nameTyConName - ; return [(nm, mkLocalId (localiseName nm) var_ty) | nm <- ns] } + ; return [(nm, mkLocalId (localiseName nm) var_ty) | nm <- ns] } addBinds :: [GenSymBind] -> DsM a -> DsM a @@ -1379,73 +1382,73 @@ lookupOcc :: Name -> DsM (Core TH.Name) lookupOcc n = do { mb_val <- dsLookupMetaEnv n ; case mb_val of - Nothing -> globalVar n - Just (Bound x) -> return (coreVar x) - Just (Splice _) -> pprPanic "repE:lookupOcc" (ppr n) + Nothing -> globalVar n + Just (Bound x) -> return (coreVar x) + Just (Splice _) -> pprPanic "repE:lookupOcc" (ppr n) } globalVar :: Name -> DsM (Core TH.Name) -- Not bound by the meta-env -- Could be top-level; or could be local --- f x = $(g [| x |]) +-- f x = $(g [| x |]) -- Here the x will be local globalVar name | isExternalName name - = do { MkC mod <- coreStringLit name_mod + = do { MkC mod <- coreStringLit name_mod ; MkC pkg <- coreStringLit name_pkg - ; MkC occ <- occNameLit name - ; rep2 mk_varg [pkg,mod,occ] } + ; MkC occ <- occNameLit name + ; rep2 mk_varg [pkg,mod,occ] } | otherwise - = do { MkC occ <- occNameLit name - ; MkC uni <- coreIntLit (getKey (getUnique name)) - ; rep2 mkNameLName [occ,uni] } + = do { MkC occ <- occNameLit name + ; MkC uni <- coreIntLit (getKey (getUnique name)) + ; rep2 mkNameLName [occ,uni] } where mod = ASSERT( isExternalName name) nameModule name name_mod = moduleNameString (moduleName mod) name_pkg = packageIdString (modulePackageId mod) name_occ = nameOccName name mk_varg | OccName.isDataOcc name_occ = mkNameG_dName - | OccName.isVarOcc name_occ = mkNameG_vName - | OccName.isTcOcc name_occ = mkNameG_tcName - | otherwise = pprPanic "DsMeta.globalVar" (ppr name) + | OccName.isVarOcc name_occ = mkNameG_vName + | OccName.isTcOcc name_occ = mkNameG_tcName + | otherwise = pprPanic "DsMeta.globalVar" (ppr name) -lookupType :: Name -- Name of type constructor (e.g. TH.ExpQ) - -> DsM Type -- The type +lookupType :: Name -- Name of type constructor (e.g. TH.ExpQ) + -> DsM Type -- The type lookupType tc_name = do { tc <- dsLookupTyCon tc_name ; - return (mkTyConApp tc []) } + return (mkTyConApp tc []) } wrapGenSyms :: [GenSymBind] - -> Core (TH.Q a) -> DsM (Core (TH.Q a)) + -> Core (TH.Q a) -> DsM (Core (TH.Q a)) -- wrapGenSyms [(nm1,id1), (nm2,id2)] y --- --> bindQ (gensym nm1) (\ id1 -> --- bindQ (gensym nm2 (\ id2 -> --- y)) +-- --> bindQ (gensym nm1) (\ id1 -> +-- bindQ (gensym nm2 (\ id2 -> +-- y)) wrapGenSyms binds body@(MkC b) = do { var_ty <- lookupType nameTyConName - ; go var_ty binds } + ; go var_ty binds } where [elt_ty] = tcTyConAppArgs (exprType b) - -- b :: Q a, so we can get the type 'a' by looking at the - -- argument type. NB: this relies on Q being a data/newtype, - -- not a type synonym + -- b :: Q a, so we can get the type 'a' by looking at the + -- argument type. NB: this relies on Q being a data/newtype, + -- not a type synonym go _ [] = return body go var_ty ((name,id) : binds) = do { MkC body' <- go var_ty binds - ; lit_str <- occNameLit name - ; gensym_app <- repGensym lit_str - ; repBindQ var_ty elt_ty - gensym_app (MkC (Lam id body')) } + ; lit_str <- occNameLit name + ; gensym_app <- repGensym lit_str + ; repBindQ var_ty elt_ty + gensym_app (MkC (Lam id body')) } occNameLit :: Name -> DsM (Core String) occNameLit n = coreStringLit (occNameString (nameOccName n)) -- %********************************************************************* --- %* * --- Constructing code --- %* * +-- %* * +-- Constructing code +-- %* * -- %********************************************************************* ----------------------------------------------------------------------------- @@ -1472,9 +1475,9 @@ dataCon n = dataCon' n [] -- %********************************************************************* --- %* * --- The 'smart constructors' --- %* * +-- %* * +-- The 'smart constructors' +-- %* * -- %********************************************************************* --------------- Patterns ----------------- @@ -1520,7 +1523,7 @@ repPview (MkC e) (MkC p) = rep2 viewPName [e,p] --------------- Expressions ----------------- repVarOrCon :: Name -> Core TH.Name -> DsM (Core TH.ExpQ) repVarOrCon vc str | isDataOcc (nameOccName vc) = repCon str - | otherwise = repVar str + | otherwise = repVar str repVar :: Core TH.Name -> DsM (Core TH.ExpQ) repVar (MkC s) = rep2 varEName [s] @@ -1867,7 +1870,7 @@ repKConstraint :: DsM (Core TH.Kind) repKConstraint = rep2 constraintKName [] ---------------------------------------------------------- --- Literals +-- Literals repLiteral :: HsLit -> DsM (Core TH.Lit) repLiteral lit @@ -1880,20 +1883,20 @@ repLiteral lit _ -> return lit lit_expr <- dsLit lit' case mb_lit_name of - Just lit_name -> rep2 lit_name [lit_expr] - Nothing -> notHandled "Exotic literal" (ppr lit) + Just lit_name -> rep2 lit_name [lit_expr] + Nothing -> notHandled "Exotic literal" (ppr lit) where mb_lit_name = case lit of - HsInteger _ _ -> Just integerLName - HsInt _ -> Just integerLName - HsIntPrim _ -> Just intPrimLName - HsWordPrim _ -> Just wordPrimLName - HsFloatPrim _ -> Just floatPrimLName - HsDoublePrim _ -> Just doublePrimLName - HsChar _ -> Just charLName - HsString _ -> Just stringLName - HsRat _ _ -> Just rationalLName - _ -> Nothing + HsInteger _ _ -> Just integerLName + HsInt _ -> Just integerLName + HsIntPrim _ -> Just intPrimLName + HsWordPrim _ -> Just wordPrimLName + HsFloatPrim _ -> Just floatPrimLName + HsDoublePrim _ -> Just doublePrimLName + HsChar _ -> Just charLName + HsString _ -> Just stringLName + HsRat _ _ -> Just rationalLName + _ -> Nothing mk_integer :: Integer -> DsM HsLit mk_integer i = do integer_ty <- lookupType integerTyConName @@ -1907,9 +1910,9 @@ mk_string s = return $ HsString s repOverloadedLiteral :: HsOverLit Name -> DsM (Core TH.Lit) repOverloadedLiteral (OverLit { ol_val = val}) = do { lit <- mk_lit val; repLiteral lit } - -- The type Rational will be in the environment, because - -- the smart constructor 'TH.Syntax.rationalL' uses it in its type, - -- and rationalL is sucked in when any TH stuff is used + -- The type Rational will be in the environment, because + -- the smart constructor 'TH.Syntax.rationalL' uses it in its type, + -- and rationalL is sucked in when any TH stuff is used mk_lit :: OverLitVal -> DsM HsLit mk_lit (HsIntegral i) = mk_integer i @@ -1921,8 +1924,8 @@ mk_lit (HsIsString s) = mk_string s repGensym :: Core String -> DsM (Core (TH.Q TH.Name)) repGensym (MkC lit_str) = rep2 newNameName [lit_str] -repBindQ :: Type -> Type -- a and b - -> Core (TH.Q a) -> Core (a -> TH.Q b) -> DsM (Core (TH.Q b)) +repBindQ :: Type -> Type -- a and b + -> Core (TH.Q a) -> Core (a -> TH.Q b) -> DsM (Core (TH.Q b)) repBindQ ty_a ty_b (MkC x) (MkC y) = rep2 bindQName [Type ty_a, Type ty_b, x, y] @@ -1933,25 +1936,25 @@ repSequenceQ ty_a (MkC list) ------------ Lists and Tuples ------------------- -- turn a list of patterns into a single pattern matching a list -repList :: Name -> (a -> DsM (Core b)) +repList :: Name -> (a -> DsM (Core b)) -> [a] -> DsM (Core [b]) repList tc_name f args = do { args1 <- mapM f args ; coreList tc_name args1 } -coreList :: Name -- Of the TyCon of the element type - -> [Core a] -> DsM (Core [a]) +coreList :: Name -- Of the TyCon of the element type + -> [Core a] -> DsM (Core [a]) coreList tc_name es = do { elt_ty <- lookupType tc_name; return (coreList' elt_ty es) } -coreList' :: Type -- The element type - -> [Core a] -> Core [a] +coreList' :: Type -- The element type + -> [Core a] -> Core [a] coreList' elt_ty es = MkC (mkListExpr elt_ty (map unC es )) nonEmptyCoreList :: [Core a] -> Core [a] -- The list must be non-empty so we can get the element type -- Otherwise use coreList -nonEmptyCoreList [] = panic "coreList: empty argument" +nonEmptyCoreList [] = panic "coreList: empty argument" nonEmptyCoreList xs@(MkC x:_) = MkC (mkListExpr (exprType x) (map unC xs)) coreStringLit :: String -> DsM (Core String) @@ -1963,7 +1966,7 @@ coreIntLit :: Int -> DsM (Core Int) coreIntLit i = do dflags <- getDynFlags return (MkC (mkIntExprInt dflags i)) -coreVar :: Id -> Core TH.Name -- The Id has type Name +coreVar :: Id -> Core TH.Name -- The Id has type Name coreVar id = MkC (Var id) ----------------- Failure ----------------------- @@ -1971,13 +1974,13 @@ notHandled :: String -> SDoc -> DsM a notHandled what doc = failWithDs msg where msg = hang (text what <+> ptext (sLit "not (yet) handled by Template Haskell")) - 2 doc + 2 doc -- %************************************************************************ --- %* * --- The known-key names for Template Haskell --- %* * +-- %* * +-- The known-key names for Template Haskell +-- %* * -- %************************************************************************ -- To add a name, do three things @@ -2414,10 +2417,10 @@ tySynEqnQTyConName = libTc (fsLit "TySynEqnQ") tySynEqnQTyConKey -- quasiquoting quoteExpName, quotePatName, quoteDecName, quoteTypeName :: Name -quoteExpName = qqFun (fsLit "quoteExp") quoteExpKey -quotePatName = qqFun (fsLit "quotePat") quotePatKey -quoteDecName = qqFun (fsLit "quoteDec") quoteDecKey -quoteTypeName = qqFun (fsLit "quoteType") quoteTypeKey +quoteExpName = qqFun (fsLit "quoteExp") quoteExpKey +quotePatName = qqFun (fsLit "quotePat") quotePatKey +quoteDecName = qqFun (fsLit "quoteDec") quoteDecKey +quoteTypeName = qqFun (fsLit "quoteType") quoteTypeKey -- TyConUniques available: 200-299 -- Check in PrelNames if you want to change this diff --git a/compiler/ghci/ByteCodeAsm.lhs b/compiler/ghci/ByteCodeAsm.lhs index 955119768df3..9906467186e7 100644 --- a/compiler/ghci/ByteCodeAsm.lhs +++ b/compiler/ghci/ByteCodeAsm.lhs @@ -156,7 +156,7 @@ assembleBCO dflags (ProtoBCO nm instrs bitmap bsize arity _origin _malloced) = d (final_insns, final_lits, final_ptrs) <- flip execStateT initial_state $ runAsm dflags long_jumps env asm -- precomputed size should be equal to final size - ASSERT (n_insns == sizeSS final_insns) return () + ASSERT(n_insns == sizeSS final_insns) return () let asm_insns = ssElts final_insns barr a = case a of UArray _lo _hi _n b -> b diff --git a/compiler/ghci/DebuggerUtils.hs b/compiler/ghci/DebuggerUtils.hs index 7a03bbcdc23b..3d73e69e2bfe 100644 --- a/compiler/ghci/DebuggerUtils.hs +++ b/compiler/ghci/DebuggerUtils.hs @@ -115,14 +115,14 @@ dataConInfoPtrToName x = do -- Warning: this code assumes that the string is well formed. parse :: [Word8] -> ([Word8], [Word8], [Word8]) parse input - = ASSERT (all (>0) (map length [pkg, mod, occ])) (pkg, mod, occ) + = ASSERT(all (>0) (map length [pkg, mod, occ])) (pkg, mod, occ) where dot = fromIntegral (ord '.') (pkg, rest1) = break (== fromIntegral (ord ':')) input (mod, occ) = (concat $ intersperse [dot] $ reverse modWords, occWord) where - (modWords, occWord) = ASSERT (length rest1 > 0) (parseModOcc [] (tail rest1)) + (modWords, occWord) = ASSERT(length rest1 > 0) (parseModOcc [] (tail rest1)) parseModOcc :: [[Word8]] -> [Word8] -> ([[Word8]], [Word8]) -- We only look for dots if str could start with a module name, -- i.e. if it starts with an upper case character. diff --git a/compiler/ghci/Linker.lhs b/compiler/ghci/Linker.lhs index 7bfee0f48b19..df82510b621f 100644 --- a/compiler/ghci/Linker.lhs +++ b/compiler/ghci/Linker.lhs @@ -636,7 +636,7 @@ getLinkDeps hsc_env hpt pls replace_osuf span mods return lnk adjust_ul new_osuf (DotO file) = do - MASSERT (osuf `isSuffixOf` file) + MASSERT(osuf `isSuffixOf` file) let file_base = reverse (drop (length osuf + 1) (reverse file)) new_file = file_base <.> new_osuf ok <- doesFileExist new_file @@ -895,7 +895,7 @@ linkSomeBCOs dflags toplevs_only ie ce_in ul_bcos else ce_all_additions ce_out = -- make sure we're not inserting duplicate names into the -- closure environment, which leads to trouble. - ASSERT (all (not . (`elemNameEnv` ce_in)) (map fst ce_additions)) + ASSERT(all (not . (`elemNameEnv` ce_in)) (map fst ce_additions)) extendClosureEnv ce_in ce_additions return (ce_out, hvals) diff --git a/compiler/ghci/RtClosureInspect.hs b/compiler/ghci/RtClosureInspect.hs index d6cbf87fccf2..746a547a5bca 100644 --- a/compiler/ghci/RtClosureInspect.hs +++ b/compiler/ghci/RtClosureInspect.hs @@ -1264,7 +1264,7 @@ unlessM condM acc = condM >>= \c -> unless c acc -- Strict application of f at index i appArr :: Ix i => (e -> a) -> Array i e -> Int -> a appArr f a@(Array _ _ _ ptrs#) i@(I# i#) - = ASSERT2 (i < length(elems a), ppr(length$ elems a, i)) + = ASSERT2(i < length(elems a), ppr(length$ elems a, i)) case indexArray# ptrs# i# of (# e #) -> f e diff --git a/compiler/iface/BinIface.hs b/compiler/iface/BinIface.hs index 52650e7e99e5..ba1a7e28e2b4 100644 --- a/compiler/iface/BinIface.hs +++ b/compiler/iface/BinIface.hs @@ -24,7 +24,6 @@ import TyCon import DataCon (dataConName, dataConWorkId, dataConTyCon) import PrelInfo (wiredInThings, basicKnownKeyNames) import Id (idName, isDataConWorkId_maybe) -import CoreSyn (DFunArg(..)) import Coercion (LeftOrRight(..)) import TysWiredIn import IfaceEnv @@ -1110,14 +1109,6 @@ instance Binary IfaceIdDetails where 1 -> do { a <- get bh; b <- get bh; return (IfRecSelId a b) } _ -> do { n <- get bh; return (IfDFunId n) } -instance Binary (DFunArg IfaceExpr) where - put_ bh (DFunPolyArg e) = putByte bh 0 >> put_ bh e - put_ bh (DFunLamArg i) = putByte bh 1 >> put_ bh i - get bh = do { h <- getByte bh - ; case h of - 0 -> do { a <- get bh; return (DFunPolyArg a) } - _ -> do { a <- get bh; return (DFunLamArg a) } } - instance Binary IfaceIdInfo where put_ bh NoInfo = putByte bh 0 put_ bh (HasInfo i) = putByte bh 1 >> lazyPut bh i -- NB lazyPut @@ -1164,9 +1155,10 @@ instance Binary IfaceUnfolding where putByte bh 3 put_ bh a put_ bh n - put_ bh (IfDFunUnfold as) = do + put_ bh (IfDFunUnfold as bs) = do putByte bh 4 put_ bh as + put_ bh bs put_ bh (IfCompulsory e) = do putByte bh 5 put_ bh e @@ -1188,7 +1180,8 @@ instance Binary IfaceUnfolding where n <- get bh return (IfExtWrapper a n) 4 -> do as <- get bh - return (IfDFunUnfold as) + bs <- get bh + return (IfDFunUnfold as bs) _ -> do e <- get bh return (IfCompulsory e) diff --git a/compiler/iface/IfaceEnv.lhs b/compiler/iface/IfaceEnv.lhs index 20a21c373335..0441fdbf4188 100644 --- a/compiler/iface/IfaceEnv.lhs +++ b/compiler/iface/IfaceEnv.lhs @@ -55,10 +55,27 @@ import Data.IORef ( atomicModifyIORef, readIORef ) %* * %********************************************************* +Note [The Name Cache] +~~~~~~~~~~~~~~~~~~~~~ +The Name Cache makes sure that, during any invovcation of GHC, each +External Name "M.x" has one, and only one globally-agreed Unique. + +* The first time we come across M.x we make up a Unique and record that + association in the Name Cache. + +* When we come across "M.x" again, we look it up in the Name Cache, + and get a hit. + +The functions newGlobalBinder, allocateGlobalBinder do the main work. +When you make an External name, you should probably be calling one +of them. + + \begin{code} newGlobalBinder :: Module -> OccName -> SrcSpan -> TcRnIf a b Name -- Used for source code and interface files, to make the -- Name for a thing, given its Module and OccName +-- See Note [The Name Cache] -- -- The cache may already already have a binding for this thing, -- because we may have seen an occurrence before, but now is the @@ -74,6 +91,7 @@ allocateGlobalBinder :: NameCache -> Module -> OccName -> SrcSpan -> (NameCache, Name) +-- See Note [The Name Cache] allocateGlobalBinder name_supply mod occ loc = case lookupOrigNameCache (nsNames name_supply) mod occ of -- A hit in the cache! We are at the binding site of the name. @@ -171,6 +189,8 @@ lookupOrig mod occ %* * %************************************************************************ +See Note [The Name Cache] above. + \begin{code} lookupOrigNameCache :: OrigNameCache -> Module -> OccName -> Maybe Name lookupOrigNameCache _ mod occ diff --git a/compiler/iface/IfaceSyn.lhs b/compiler/iface/IfaceSyn.lhs index b38e43b6576a..ad327d642826 100644 --- a/compiler/iface/IfaceSyn.lhs +++ b/compiler/iface/IfaceSyn.lhs @@ -37,7 +37,6 @@ module IfaceSyn ( #include "HsVersions.h" import IfaceType -import CoreSyn( DFunArg, dfunArgExprs ) import PprCore() -- Printing DFunArgs import Demand import Annotations @@ -280,7 +279,7 @@ data IfaceUnfolding | IfLclWrapper Arity IfLclName -- because the worker can simplify to a function in -- another module. - | IfDFunUnfold [DFunArg IfaceExpr] + | IfDFunUnfold [IfaceBndr] [IfaceExpr] -------------------------------- data IfaceExpr @@ -796,8 +795,8 @@ instance Outputable IfaceUnfolding where <+> parens (ptext (sLit "arity") <+> int a) ppr (IfExtWrapper a wkr) = ptext (sLit "Worker(ext):") <+> ppr wkr <+> parens (ptext (sLit "arity") <+> int a) - ppr (IfDFunUnfold ns) = ptext (sLit "DFun:") - <+> brackets (pprWithCommas ppr ns) + ppr (IfDFunUnfold bs es) = hang (ptext (sLit "DFun:") <+> sep (map ppr bs) <> dot) + 2 (sep (map pprParendIfaceExpr es)) -- ----------------------------------------------------------------------------- -- | Finding the Names in IfaceSyn @@ -927,7 +926,7 @@ freeNamesIfUnfold (IfCompulsory e) = freeNamesIfExpr e freeNamesIfUnfold (IfInlineRule _ _ _ e) = freeNamesIfExpr e freeNamesIfUnfold (IfExtWrapper _ v) = unitNameSet v freeNamesIfUnfold (IfLclWrapper {}) = emptyNameSet -freeNamesIfUnfold (IfDFunUnfold vs) = fnList freeNamesIfExpr (dfunArgExprs vs) +freeNamesIfUnfold (IfDFunUnfold bs es) = fnList freeNamesIfBndr bs &&& fnList freeNamesIfExpr es freeNamesIfExpr :: IfaceExpr -> NameSet freeNamesIfExpr (IfaceExt v) = unitNameSet v diff --git a/compiler/iface/MkIface.lhs b/compiler/iface/MkIface.lhs index 065d68c2d546..d9bd6fc94189 100644 --- a/compiler/iface/MkIface.lhs +++ b/compiler/iface/MkIface.lhs @@ -1759,8 +1759,8 @@ toIfUnfolding lb (CoreUnfolding { uf_tmpl = rhs, uf_arity = arity where if_rhs = toIfaceExpr rhs -toIfUnfolding lb (DFunUnfolding _ar _con ops) - = Just (HsUnfold lb (IfDFunUnfold (map (fmap toIfaceExpr) ops))) +toIfUnfolding lb (DFunUnfolding { df_bndrs = bndrs, df_args = args }) + = Just (HsUnfold lb (IfDFunUnfold (map toIfaceBndr bndrs) (map toIfaceExpr args))) -- No need to serialise the data constructor; -- we can recover it from the type of the dfun diff --git a/compiler/iface/TcIface.lhs b/compiler/iface/TcIface.lhs index 5422e516a2b7..af9d8f609c82 100644 --- a/compiler/iface/TcIface.lhs +++ b/compiler/iface/TcIface.lhs @@ -461,15 +461,22 @@ tc_iface_decl parent _ (IfaceData {ifName = occ_name, ; let fam_tc = coAxiomTyCon ax ax_unbr = toUnbranchedAxiom ax -- data families don't have branches: - branch = coAxiomSingleBranch ax_unbr - ax_tvs = coAxBranchTyVars branch - ax_lhs = coAxBranchLHS branch - subst = zipTopTvSubst ax_tvs (mkTyVarTys tyvars) + branch = coAxiomSingleBranch ax_unbr + ax_tvs = coAxBranchTyVars branch + ax_lhs = coAxBranchLHS branch + tycon_tys = mkTyVarTys tyvars + subst = mkTopTvSubst (ax_tvs `zip` tycon_tys) -- The subst matches the tyvar of the TyCon -- with those from the CoAxiom. They aren't -- necessarily the same, since the two may be -- gotten from separate interface-file declarations - ; return (FamInstTyCon ax_unbr fam_tc (substTys subst ax_lhs)) } + -- NB: ax_tvs may be shorter because of eta-reduction + -- See Note [Eta reduction for data family axioms] in TcInstDcls + lhs_tys = substTys subst ax_lhs `chkAppend` + dropList ax_tvs tycon_tys + -- The 'lhs_tys' should be 1-1 with the 'tyvars' + -- but ax_tvs maybe shorter because of eta-reduction + ; return (FamInstTyCon ax_unbr fam_tc lhs_tys) } tc_iface_decl parent _ (IfaceSyn {ifName = occ_name, ifTyVars = tv_bndrs, ifSynRhs = mb_rhs_ty, @@ -1254,15 +1261,15 @@ tcUnfolding name _ _ (IfInlineRule arity unsat_ok boring_ok if_expr) (UnfWhen unsat_ok boring_ok)) } -tcUnfolding name dfun_ty _ (IfDFunUnfold ops) - = do { mb_ops1 <- forkM_maybe doc $ mapM tc_arg ops +tcUnfolding name dfun_ty _ (IfDFunUnfold bs ops) + = bindIfaceBndrs bs $ \ bs' -> + do { mb_ops1 <- forkM_maybe doc $ mapM tcIfaceExpr ops ; return (case mb_ops1 of Nothing -> noUnfolding - Just ops1 -> mkDFunUnfolding dfun_ty ops1) } + Just ops1 -> mkDFunUnfolding bs' (classDataCon cls) ops1) } where doc = text "Class ops for dfun" <+> ppr name - tc_arg (DFunPolyArg e) = do { e' <- tcIfaceExpr e; return (DFunPolyArg e') } - tc_arg (DFunLamArg i) = return (DFunLamArg i) + (_, _, cls, _) = tcSplitDFunTy dfun_ty tcUnfolding name ty info (IfExtWrapper arity wkr) = tcIfaceWrapper name ty info arity (tcIfaceExtId wkr) diff --git a/compiler/llvmGen/LlvmCodeGen.hs b/compiler/llvmGen/LlvmCodeGen.hs index a157a258fea3..4f2bded6bbbc 100644 --- a/compiler/llvmGen/LlvmCodeGen.hs +++ b/compiler/llvmGen/LlvmCodeGen.hs @@ -117,19 +117,19 @@ cmmProcLlvmGens :: DynFlags -> BufHandle -> UniqSupply -> LlvmEnv -> [RawCmmDecl -> [[LlvmVar]] -- ^ info tables that need to be marked as 'used' -> IO () -cmmProcLlvmGens _ _ _ _ [] _ [] - = return () - cmmProcLlvmGens dflags h _ _ [] _ ivars - = let ivars' = concat ivars - cast x = LMBitc (LMStaticPointer (pVarLift x)) i8Ptr - ty = (LMArray (length ivars') i8Ptr) - usedArray = LMStaticArray (map cast ivars') ty - lmUsed = (LMGlobalVar (fsLit "llvm.used") ty Appending - (Just $ fsLit "llvm.metadata") Nothing False, Just usedArray) - in Prt.bufLeftRender h $ {-# SCC "llvm_used_ppr" #-} - withPprStyleDoc dflags (mkCodeStyle CStyle) $ - pprLlvmData ([lmUsed], []) + | null ivars' = return () + | otherwise = Prt.bufLeftRender h $ + {-# SCC "llvm_used_ppr" #-} + withPprStyleDoc dflags (mkCodeStyle CStyle) $ + pprLlvmData ([lmUsed], []) + where + ivars' = concat ivars + cast x = LMBitc (LMStaticPointer (pVarLift x)) i8Ptr + ty = (LMArray (length ivars') i8Ptr) + usedArray = LMStaticArray (map cast ivars') ty + lmUsed = (LMGlobalVar (fsLit "llvm.used") ty Appending + (Just $ fsLit "llvm.metadata") Nothing False, Just usedArray) cmmProcLlvmGens dflags h us env ((CmmData _ _) : cmms) count ivars = cmmProcLlvmGens dflags h us env cmms count ivars diff --git a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs index 915981752e53..d7ddbdd02732 100644 --- a/compiler/llvmGen/LlvmCodeGen/CodeGen.hs +++ b/compiler/llvmGen/LlvmCodeGen/CodeGen.hs @@ -387,6 +387,9 @@ genCall env target res args = do `appOL` retStmt, top1 ++ top2 ++ top3) +-- genCallSimpleCast _ _ _ dsts _ = +-- panic ("genCallSimpleCast: " ++ show (length dsts) ++ " dsts") + -- | Create a function pointer from a target. getFunPtr :: LlvmEnv -> (LMString -> LlvmType) -> ForeignTarget -> UniqSM ExprData diff --git a/compiler/llvmGen/LlvmCodeGen/Ppr.hs b/compiler/llvmGen/LlvmCodeGen/Ppr.hs index c699631e9c8b..3afa9100e4ad 100644 --- a/compiler/llvmGen/LlvmCodeGen/Ppr.hs +++ b/compiler/llvmGen/LlvmCodeGen/Ppr.hs @@ -61,6 +61,9 @@ moduleLayout = sdocWithPlatform $ \platform -> Platform { platformArch = ArchARM {}, platformOS = OSAndroid } -> text "target datalayout = \"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32\"" $+$ text "target triple = \"arm-unknown-linux-androideabi\"" + Platform { platformArch = ArchARM {}, platformOS = OSQNXNTO } -> + text "target datalayout = \"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32\"" + $+$ text "target triple = \"arm-unknown-nto-qnx8.0.0eabi\"" Platform { platformArch = ArchARM {}, platformOS = OSiOS } -> text "target datalayout = \"e-p:32:32:32-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:64:128-a0:0:64-n32\"" $+$ text "target triple = \"arm-apple-darwin10\"" diff --git a/compiler/main/DriverPipeline.hs b/compiler/main/DriverPipeline.hs index 7df823c27db3..7ceccef0a316 100644 --- a/compiler/main/DriverPipeline.hs +++ b/compiler/main/DriverPipeline.hs @@ -169,7 +169,7 @@ compileOne' m_tc_result mHscMessage case e of Left iface -> do details <- genModDetails hsc_env iface - MASSERT (isJust maybe_old_linkable) + MASSERT(isJust maybe_old_linkable) return (HomeModInfo{ hm_details = details, hm_iface = iface, hm_linkable = maybe_old_linkable }) @@ -1140,10 +1140,10 @@ runPhase (RealPhase cc_phase) input_fn dflags -- very weakly typed, being derived from C--. ["-fno-strict-aliasing"] - let gcc_lang_opt | cc_phase `eqPhase` Ccpp = "c++" - | cc_phase `eqPhase` Cobjc = "objective-c" + let gcc_lang_opt | cc_phase `eqPhase` Ccpp = "c++" + | cc_phase `eqPhase` Cobjc = "objective-c" | cc_phase `eqPhase` Cobjcpp = "objective-c++" - | otherwise = "c" + | otherwise = "c" liftIO $ SysTools.runCc dflags ( -- force the C compiler to interpret this file as C when -- compiling .hc files, by adding the -x c option. @@ -2042,8 +2042,12 @@ doCpp dflags raw input_fn output_fn = do ++ map SysTools.Option backend_defs ++ map SysTools.Option hscpp_opts ++ map SysTools.Option sse_defs + -- Set the language mode to assembler-with-cpp when preprocessing. This + -- alleviates some of the C99 macro rules relating to whitespace and the hash + -- operator, which we tend to abuse. Clang in particular is not very happy + -- about this. ++ [ SysTools.Option "-x" - , SysTools.Option "c" + , SysTools.Option "assembler-with-cpp" , SysTools.Option input_fn -- We hackily use Option instead of FileOption here, so that the file -- name is not back-slashed on Windows. cpp is capable of diff --git a/compiler/main/DynFlags.hs b/compiler/main/DynFlags.hs index 2319b92e3de3..64ae9b5699a7 100644 --- a/compiler/main/DynFlags.hs +++ b/compiler/main/DynFlags.hs @@ -129,6 +129,9 @@ module DynFlags ( -- * SSE isSse2Enabled, isSse4_2Enabled, + + -- * Linker information + LinkerInfo(..), ) where #include "HsVersions.h" @@ -273,6 +276,8 @@ data GeneralFlag -- optimisation opts | Opt_Strictness + | Opt_KillAbsence + | Opt_KillOneShot | Opt_FullLaziness | Opt_FloatIn | Opt_Specialise @@ -740,7 +745,10 @@ data DynFlags = DynFlags { nextWrapperNum :: IORef Int, -- | Machine dependant flags (-m stuff) - sseVersion :: Maybe (Int, Int) -- (major, minor) + sseVersion :: Maybe (Int, Int), -- (major, minor) + + -- | Run-time linker information (what options we need, etc.) + rtldFlags :: IORef (Maybe LinkerInfo) } class HasDynFlags m where @@ -1199,6 +1207,7 @@ initDynFlags dflags = do refFilesToNotIntermediateClean <- newIORef [] refGeneratedDumps <- newIORef Set.empty refLlvmVersion <- newIORef 28 + refRtldFlags <- newIORef Nothing wrapperNum <- newIORef 0 canUseUnicodeQuotes <- do let enc = localeEncoding str = "‛’" @@ -1214,7 +1223,8 @@ initDynFlags dflags = do generatedDumps = refGeneratedDumps, llvmVersion = refLlvmVersion, nextWrapperNum = wrapperNum, - useUnicodeQuotes = canUseUnicodeQuotes + useUnicodeQuotes = canUseUnicodeQuotes, + rtldFlags = refRtldFlags } -- | The normal 'DynFlags'. Note that they is not suitable for use in this form @@ -1347,7 +1357,8 @@ defaultDynFlags mySettings = llvmVersion = panic "defaultDynFlags: No llvmVersion", interactivePrint = Nothing, nextWrapperNum = panic "defaultDynFlags: No nextWrapperNum", - sseVersion = Nothing + sseVersion = Nothing, + rtldFlags = panic "defaultDynFlags: no rtldFlags" } defaultWays :: Settings -> [Way] @@ -2534,7 +2545,9 @@ fFlags = [ ( "hpc", Opt_Hpc, nop ), ( "pre-inlining", Opt_SimplPreInlining, nop ), ( "flat-cache", Opt_FlatCache, nop ), - ( "use-rpaths", Opt_RPath, nop ) + ( "use-rpaths", Opt_RPath, nop ), + ( "kill-absence", Opt_KillAbsence, nop), + ( "kill-one-shot", Opt_KillOneShot, nop) ] -- | These @-f\@ flags can all be reversed with @-fno-\@ @@ -3515,3 +3528,14 @@ isSse2Enabled dflags = case platformArch (targetPlatform dflags) of isSse4_2Enabled :: DynFlags -> Bool isSse4_2Enabled dflags = sseVersion dflags >= Just (4,2) + +-- ----------------------------------------------------------------------------- +-- Linker information + +-- LinkerInfo contains any extra options needed by the system linker. +data LinkerInfo + = GnuLD [Option] + | GnuGold [Option] + | DarwinLD [Option] + | UnknownLD + deriving Eq diff --git a/compiler/main/GhcMake.hs b/compiler/main/GhcMake.hs index 4970b6725e2e..c43b18a62ad5 100644 --- a/compiler/main/GhcMake.hs +++ b/compiler/main/GhcMake.hs @@ -332,8 +332,7 @@ load how_much = do liftIO $ intermediateCleanTempFiles dflags mods_to_keep hsc_env1 -- there should be no Nothings where linkables should be, now - ASSERT(all (isJust.hm_linkable) - (eltsUFM (hsc_HPT hsc_env))) do + ASSERT(all (isJust.hm_linkable) (eltsUFM (hsc_HPT hsc_env))) do -- Link everything together linkresult <- liftIO $ link (ghcLink dflags) dflags False hpt4 diff --git a/compiler/main/StaticFlags.hs b/compiler/main/StaticFlags.hs index a1104de5f6f5..09d577263782 100644 --- a/compiler/main/StaticFlags.hs +++ b/compiler/main/StaticFlags.hs @@ -35,7 +35,10 @@ module StaticFlags ( addOpt, removeOpt, v_opt_C_ready, -- Saving/restoring globals - saveStaticFlagGlobals, restoreStaticFlagGlobals + saveStaticFlagGlobals, restoreStaticFlagGlobals, + + -- For options autocompletion + flagsStatic, flagsStaticNames ) where #include "HsVersions.h" @@ -139,9 +142,13 @@ flagsStatic = [ ] + isStaticFlag :: String -> Bool -isStaticFlag f = - f `elem` [ +isStaticFlag f = f `elem` flagsStaticNames + + +flagsStaticNames :: [String] +flagsStaticNames = [ "fdicts-strict", "fno-state-hack", "fno-opt-coercion", diff --git a/compiler/main/SysTools.lhs b/compiler/main/SysTools.lhs index 59261149843b..d43826a046f6 100644 --- a/compiler/main/SysTools.lhs +++ b/compiler/main/SysTools.lhs @@ -24,6 +24,8 @@ module SysTools ( figureLlvmVersion, readElfSection, + getLinkerInfo, + linkDynLib, askCc, @@ -596,13 +598,122 @@ figureLlvmVersion dflags = do text "Make sure you have installed LLVM"] return Nothing) return ver - + + +{- Note [Run-time linker info] + +See also: Trac #5240, Trac #6063 + +Before 'runLink', we need to be sure to get the relevant information +about the linker we're using at runtime to see if we need any extra +options. For example, GNU ld requires '--reduce-memory-overheads' and +'--hash-size=31' in order to use reasonable amounts of memory (see +trac #5240.) But this isn't supported in GNU gold. + +Generally, the linker changing from what was detected at ./configure +time has always been possible using -pgml, but on Linux it can happen +'transparently' by installing packages like binutils-gold, which +change what /usr/bin/ld actually points to. + +Clang vs GCC notes: + +For gcc, 'gcc -Wl,--version' gives a bunch of output about how to +invoke the linker before the version information string. For 'clang', +the version information for 'ld' is all that's output. For this +reason, we typically need to slurp up all of the standard error output +and look through it. + +Other notes: + +We cache the LinkerInfo inside DynFlags, since clients may link +multiple times. The definition of LinkerInfo is there to avoid a +circular dependency. + +-} + + +neededLinkArgs :: LinkerInfo -> [Option] +neededLinkArgs (GnuLD o) = o +neededLinkArgs (GnuGold o) = o +neededLinkArgs (DarwinLD o) = o +neededLinkArgs UnknownLD = [] + +-- Grab linker info and cache it in DynFlags. +getLinkerInfo :: DynFlags -> IO LinkerInfo +getLinkerInfo dflags = do + info <- readIORef (rtldFlags dflags) + case info of + Just v -> return v + Nothing -> do + v <- getLinkerInfo' dflags + writeIORef (rtldFlags dflags) (Just v) + return v + +-- See Note [Run-time linker info]. +getLinkerInfo' :: DynFlags -> IO LinkerInfo +getLinkerInfo' dflags = do + let platform = targetPlatform dflags + os = platformOS platform + (pgm,_) = pgm_l dflags + + -- Try to grab the info from the process output. + parseLinkerInfo stdo _stde _exitc + | any ("GNU ld" `isPrefixOf`) stdo = + -- GNU ld specifically needs to use less memory. This especially + -- hurts on small object files. Trac #5240. + return (GnuLD $ map Option ["-Wl,--hash-size=31", + "-Wl,--reduce-memory-overheads"]) + + | any ("GNU gold" `isPrefixOf`) stdo = + -- GNU gold does not require any special arguments. + return (GnuGold []) + + -- Unknown linker. + | otherwise = fail "invalid --version output, or linker is unsupported" + + -- Process the executable call + info <- catchIO (do + case os of + OSDarwin -> + -- Darwin has neither GNU Gold or GNU LD, but a strange linker + -- that doesn't support --version. We can just assume that's + -- what we're using. + return $ DarwinLD [] + OSMinGW32 -> + -- GHC doesn't support anything but GNU ld on Windows anyway. + -- Process creation is also fairly expensive on win32, so + -- we short-circuit here. + return $ GnuLD $ map Option ["-Wl,--hash-size=31", + "-Wl,--reduce-memory-overheads"] + _ -> do + -- In practice, we use the compiler as the linker here. Pass + -- -Wl,--version to get linker version info. + (exitc, stdo, stde) <- readProcessWithExitCode pgm + ["-Wl,--version"] "" + -- Split the output by lines to make certain kinds + -- of processing easier. In particular, 'clang' and 'gcc' + -- have slightly different outputs for '-Wl,--version', but + -- it's still easy to figure out. + parseLinkerInfo (lines stdo) (lines stde) exitc + ) + (\err -> do + debugTraceMsg dflags 2 + (text "Error (figuring out linker information):" <+> + text (show err)) + errorMsg dflags $ hang (text "Warning:") 9 $ + text "Couldn't figure out linker information!" $$ + text "Make sure you're using GNU ld, GNU gold" <+> + text "or the built in OS X linker, etc." + return UnknownLD) + return info runLink :: DynFlags -> [Option] -> IO () runLink dflags args = do + -- See Note [Run-time linker info] + linkargs <- neededLinkArgs `fmap` getLinkerInfo dflags let (p,args0) = pgm_l dflags - args1 = map Option (getOpts dflags opt_l) - args2 = args0 ++ args1 ++ args + args1 = map Option (getOpts dflags opt_l) + args2 = args0 ++ args1 ++ args ++ linkargs mb_env <- getGccEnv args2 runSomethingFiltered dflags id "Linker" p args2 mb_env diff --git a/compiler/main/TidyPgm.lhs b/compiler/main/TidyPgm.lhs index b344590fe875..be4c68327653 100644 --- a/compiler/main/TidyPgm.lhs +++ b/compiler/main/TidyPgm.lhs @@ -822,7 +822,8 @@ dffvLetBndr vanilla_unfold id -- but I've seen cases where we had a wrapper id $w but a -- rhs where $w had been inlined; see Trac #3922 - go_unf (DFunUnfolding _ _ args) = mapM_ dffvExpr (dfunArgExprs args) + go_unf (DFunUnfolding { df_bndrs = bndrs, df_args = args }) + = extendScopeList bndrs $ mapM_ dffvExpr args go_unf _ = return () go_rule (BuiltinRule {}) = return () diff --git a/compiler/nativeGen/PPC/CodeGen.hs b/compiler/nativeGen/PPC/CodeGen.hs index b3f5a48a5d3b..39b64002dcc8 100644 --- a/compiler/nativeGen/PPC/CodeGen.hs +++ b/compiler/nativeGen/PPC/CodeGen.hs @@ -913,7 +913,7 @@ genCCall' _ _ (PrimTarget MO_Touch) _ _ = return $ nilOL genCCall' dflags gcp target dest_regs args0 - = ASSERT (not $ any (`elem` [II16]) $ map cmmTypeSize argReps) + = ASSERT(not $ any (`elem` [II16]) $ map cmmTypeSize argReps) -- we rely on argument promotion in the codeGen do (finalStack,passArgumentsCode,usedRegs) <- passArguments diff --git a/compiler/nativeGen/X86/CodeGen.hs b/compiler/nativeGen/X86/CodeGen.hs index ef8a628c1f63..848a804cf176 100644 --- a/compiler/nativeGen/X86/CodeGen.hs +++ b/compiler/nativeGen/X86/CodeGen.hs @@ -1834,7 +1834,7 @@ genCCall32' dflags target dest_regs args = do use_sse2 <- sse2Enabled push_codes <- mapM (push_arg use_sse2) (reverse prom_args) delta <- getDeltaNat - MASSERT (delta == delta0 - tot_arg_size) + MASSERT(delta == delta0 - tot_arg_size) -- deal with static vs dynamic call targets (callinsns,cconv) <- diff --git a/compiler/parser/Parser.y.pp b/compiler/parser/Parser.y.pp index c3ac00dd9dd8..1545aa27f433 100644 --- a/compiler/parser/Parser.y.pp +++ b/compiler/parser/Parser.y.pp @@ -26,8 +26,16 @@ -- CPP tricks because we want the directives in the output of the -- first CPP pass. +-- +-- Clang note, 6/17/2013 by aseipp: It is *extremely* important (for +-- some reason) that there be a line of whitespace between the two +-- definitions here, and the subsequent use of __IF_GHC_77__ - this +-- seems to be a bug in clang or something, where having the line of +-- whitespace will make the preprocessor correctly format the rendered +-- lines in the 'two step' CPP pass. No, this is not a joke. #define __IF_GHC_77__ #if __GLASGOW_HASKELL__ >= 707 -#define __ENDIF__ #endif +#define __ENDIF__ #endif + __IF_GHC_77__ -- Required on x86 to avoid the register allocator running out of -- stack slots when compiling this module with -fPIC -dynamic. diff --git a/compiler/prelude/PrelNames.lhs b/compiler/prelude/PrelNames.lhs index 09835fb34e08..2d795ab9c9c5 100644 --- a/compiler/prelude/PrelNames.lhs +++ b/compiler/prelude/PrelNames.lhs @@ -617,11 +617,12 @@ punc_RDR = dataQual_RDR lEX (fsLit "Punc") ident_RDR = dataQual_RDR lEX (fsLit "Ident") symbol_RDR = dataQual_RDR lEX (fsLit "Symbol") -step_RDR, alt_RDR, reset_RDR, prec_RDR :: RdrName +step_RDR, alt_RDR, reset_RDR, prec_RDR, pfail_RDR :: RdrName step_RDR = varQual_RDR rEAD_PREC (fsLit "step") alt_RDR = varQual_RDR rEAD_PREC (fsLit "+++") reset_RDR = varQual_RDR rEAD_PREC (fsLit "reset") prec_RDR = varQual_RDR rEAD_PREC (fsLit "prec") +pfail_RDR = varQual_RDR rEAD_PREC (fsLit "pfail") showList_RDR, showList___RDR, showsPrec_RDR, showString_RDR, showSpace_RDR, showParen_RDR :: RdrName @@ -1685,6 +1686,9 @@ checkDotnetResNameIdKey = mkPreludeMiscIdUnique 154 undefinedKey :: Unique undefinedKey = mkPreludeMiscIdUnique 155 + +magicSingIKey :: Unique +magicSingIKey = mkPreludeMiscIdUnique 156 \end{code} Certain class operations from Prelude classes. They get their own diff --git a/compiler/prelude/PrelRules.lhs b/compiler/prelude/PrelRules.lhs index 079ab0cc98bb..e9d0f6bc200c 100644 --- a/compiler/prelude/PrelRules.lhs +++ b/compiler/prelude/PrelRules.lhs @@ -20,17 +20,18 @@ module PrelRules ( primOpRules, builtinRules ) where #include "HsVersions.h" #include "../includes/MachDeps.h" -import {-# SOURCE #-} MkId ( mkPrimOpId ) +import {-# SOURCE #-} MkId ( mkPrimOpId, magicSingIId ) import CoreSyn import MkCore import Id +import Var (setVarType) import Literal import CoreSubst ( exprIsLiteral_maybe ) import PrimOp ( PrimOp(..), tagToEnumKey ) import TysWiredIn import TysPrim -import TyCon ( tyConDataCons_maybe, isEnumerationTyCon, isNewTyCon ) +import TyCon ( tyConDataCons_maybe, isEnumerationTyCon, isNewTyCon, unwrapNewTyCon_maybe ) import DataCon ( dataConTag, dataConTyCon, dataConWorkId ) import CoreUtils ( cheapEqExpr, exprIsHNF ) import CoreUnfold ( exprIsConApp_maybe ) @@ -46,6 +47,7 @@ import BasicTypes import DynFlags import Platform import Util +import Coercion (mkUnbranchedAxInstCo,mkSymCo) import Control.Monad import Data.Bits as Bits @@ -512,10 +514,10 @@ mkBasicRule op_name n_args rm = BuiltinRule { ru_name = occNameFS (nameOccName op_name), ru_fn = op_name, ru_nargs = n_args, - ru_try = \dflags _ -> runRuleM rm dflags } + ru_try = \ dflags in_scope _ -> runRuleM rm dflags in_scope } newtype RuleM r = RuleM - { runRuleM :: DynFlags -> IdUnfoldingFun -> [CoreExpr] -> Maybe r } + { runRuleM :: DynFlags -> InScopeEnv -> [CoreExpr] -> Maybe r } instance Monad RuleM where return x = RuleM $ \_ _ _ -> Just x @@ -557,8 +559,8 @@ removeOp32 = mzero getArgs :: RuleM [CoreExpr] getArgs = RuleM $ \_ _ args -> Just args -getIdUnfoldingFun :: RuleM IdUnfoldingFun -getIdUnfoldingFun = RuleM $ \_ iu _ -> Just iu +getInScopeEnv :: RuleM InScopeEnv +getInScopeEnv = RuleM $ \_ iu _ -> Just iu -- return the n-th argument of this rule, if it is a literal -- argument indices start from 0 @@ -719,7 +721,7 @@ tagToEnumRule = do let tag = fromInteger i correct_tag dc = (dataConTag dc - fIRST_TAG) == tag (dc:rest) <- return $ filter correct_tag (tyConDataCons_maybe tycon `orElse` []) - ASSERT (null rest) return () + ASSERT(null rest) return () return $ mkTyApps (Var (dataConWorkId dc)) tc_args -- See Note [tagToEnum#] @@ -745,8 +747,8 @@ dataToTagRule = a `mplus` b b = do dflags <- getDynFlags [_, val_arg] <- getArgs - id_unf <- getIdUnfoldingFun - (dc,_,_) <- liftMaybe $ exprIsConApp_maybe id_unf val_arg + in_scope <- getInScopeEnv + (dc,_,_) <- liftMaybe $ exprIsConApp_maybe in_scope val_arg ASSERT( not (isNewTyCon (dataConTyCon dc)) ) return () return $ mkIntVal dflags (toInteger (dataConTag dc - fIRST_TAG)) \end{code} @@ -812,11 +814,14 @@ builtinRules :: [CoreRule] builtinRules = [BuiltinRule { ru_name = fsLit "AppendLitString", ru_fn = unpackCStringFoldrName, - ru_nargs = 4, ru_try = \_ _ -> match_append_lit }, + ru_nargs = 4, ru_try = \_ _ _ -> match_append_lit }, BuiltinRule { ru_name = fsLit "EqString", ru_fn = eqStringName, - ru_nargs = 2, ru_try = \_ _ -> match_eq_string }, + ru_nargs = 2, ru_try = \_ _ _ -> match_eq_string }, BuiltinRule { ru_name = fsLit "Inline", ru_fn = inlineIdName, - ru_nargs = 2, ru_try = \_ _ -> match_inline }] + ru_nargs = 2, ru_try = \_ _ _ -> match_inline }, + BuiltinRule { ru_name = fsLit "MagicSingI", ru_fn = idName magicSingIId, + ru_nargs = 3, ru_try = \_ _ _ -> match_magicSingI } + ] ++ builtinIntegerRules builtinIntegerRules :: [CoreRule] @@ -929,8 +934,8 @@ builtinIntegerRules = -- unpackFoldrCString# "foo" c (unpackFoldrCString# "baz" c n) -- = unpackFoldrCString# "foobaz" c n -match_append_lit :: IdUnfoldingFun -> [Expr CoreBndr] -> Maybe (Expr CoreBndr) -match_append_lit _ [Type ty1, +match_append_lit :: [Expr CoreBndr] -> Maybe (Expr CoreBndr) +match_append_lit [Type ty1, Lit (MachStr s1), c1, Var unpk `App` Type ty2 @@ -946,20 +951,20 @@ match_append_lit _ [Type ty1, `App` c1 `App` n) -match_append_lit _ _ = Nothing +match_append_lit _ = Nothing --------------------------------------------------- -- The rule is this: -- eqString (unpackCString# (Lit s1)) (unpackCString# (Lit s2) = s1==s2 -match_eq_string :: IdUnfoldingFun -> [Expr CoreBndr] -> Maybe (Expr CoreBndr) -match_eq_string _ [Var unpk1 `App` Lit (MachStr s1), +match_eq_string :: [Expr CoreBndr] -> Maybe (Expr CoreBndr) +match_eq_string [Var unpk1 `App` Lit (MachStr s1), Var unpk2 `App` Lit (MachStr s2)] | unpk1 `hasKey` unpackCStringIdKey, unpk2 `hasKey` unpackCStringIdKey = Just (if s1 == s2 then trueVal else falseVal) -match_eq_string _ _ = Nothing +match_eq_string _ = Nothing --------------------------------------------------- @@ -975,14 +980,28 @@ match_eq_string _ _ = Nothing -- programmer can't avoid -- -- Also, don't forget about 'inline's type argument! -match_inline :: IdUnfoldingFun -> [Expr CoreBndr] -> Maybe (Expr CoreBndr) -match_inline _ (Type _ : e : _) +match_inline :: [Expr CoreBndr] -> Maybe (Expr CoreBndr) +match_inline (Type _ : e : _) | (Var f, args1) <- collectArgs e, Just unf <- maybeUnfoldingTemplate (realIdUnfolding f) -- Ignore the IdUnfoldingFun here! = Just (mkApps unf args1) -match_inline _ _ = Nothing +match_inline _ = Nothing + + +-- See Note [magicSingIId magic] in `basicTypes/MkId.lhs` +-- for a description of what is going on here. +match_magicSingI :: [Expr CoreBndr] -> Maybe (Expr CoreBndr) +match_magicSingI (Type t : e : Lam b _ : _) + | ((_ : _ : fu : _),_) <- splitFunTys t + , (sI_type,_) <- splitFunTy fu + , Just (sI_tc,xs) <- splitTyConApp_maybe sI_type + , Just (_,_,co) <- unwrapNewTyCon_maybe sI_tc + = Just $ let f = setVarType b fu + in Lam f $ Var f `App` Cast e (mkSymCo (mkUnbranchedAxInstCo co xs)) + +match_magicSingI _ = Nothing ------------------------------------------------- -- Integer rules @@ -990,26 +1009,18 @@ match_inline _ _ = Nothing -- wordToInteger (79::Word#) = 79::Integer -- Similarly Int64, Word64 -match_IntToInteger :: DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_IntToInteger _ id id_unf [xl] +match_IntToInteger :: RuleFun +match_IntToInteger _ id_unf fn [xl] | Just (MachInt x) <- exprIsLiteral_maybe id_unf xl - = case idType id of + = case idType fn of FunTy _ integerTy -> Just (Lit (LitInteger x integerTy)) _ -> panic "match_IntToInteger: Id has the wrong type" match_IntToInteger _ _ _ _ = Nothing -match_WordToInteger :: DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_WordToInteger _ id id_unf [xl] +match_WordToInteger :: RuleFun +match_WordToInteger _ id_unf id [xl] | Just (MachWord x) <- exprIsLiteral_maybe id_unf xl = case idType id of FunTy _ integerTy -> @@ -1018,12 +1029,8 @@ match_WordToInteger _ id id_unf [xl] panic "match_WordToInteger: Id has the wrong type" match_WordToInteger _ _ _ _ = Nothing -match_Int64ToInteger :: DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Int64ToInteger _ id id_unf [xl] +match_Int64ToInteger :: RuleFun +match_Int64ToInteger _ id_unf id [xl] | Just (MachInt64 x) <- exprIsLiteral_maybe id_unf xl = case idType id of FunTy _ integerTy -> @@ -1032,12 +1039,8 @@ match_Int64ToInteger _ id id_unf [xl] panic "match_Int64ToInteger: Id has the wrong type" match_Int64ToInteger _ _ _ _ = Nothing -match_Word64ToInteger :: DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Word64ToInteger _ id id_unf [xl] +match_Word64ToInteger :: RuleFun +match_Word64ToInteger _ id_unf id [xl] | Just (MachWord64 x) <- exprIsLiteral_maybe id_unf xl = case idType id of FunTy _ integerTy -> @@ -1049,47 +1052,29 @@ match_Word64ToInteger _ _ _ _ = Nothing ------------------------------------------------- match_Integer_convert :: Num a => (DynFlags -> a -> Expr CoreBndr) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_convert convert dflags _ id_unf [xl] + -> RuleFun +match_Integer_convert convert dflags id_unf _ [xl] | Just (LitInteger x _) <- exprIsLiteral_maybe id_unf xl = Just (convert dflags (fromInteger x)) match_Integer_convert _ _ _ _ _ = Nothing -match_Integer_unop :: (Integer -> Integer) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_unop unop _ _ id_unf [xl] +match_Integer_unop :: (Integer -> Integer) -> RuleFun +match_Integer_unop unop _ id_unf _ [xl] | Just (LitInteger x i) <- exprIsLiteral_maybe id_unf xl = Just (Lit (LitInteger (unop x) i)) match_Integer_unop _ _ _ _ _ = Nothing -match_Integer_binop :: (Integer -> Integer -> Integer) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_binop binop _ _ id_unf [xl,yl] +match_Integer_binop :: (Integer -> Integer -> Integer) -> RuleFun +match_Integer_binop binop _ id_unf _ [xl,yl] | Just (LitInteger x i) <- exprIsLiteral_maybe id_unf xl , Just (LitInteger y _) <- exprIsLiteral_maybe id_unf yl = Just (Lit (LitInteger (x `binop` y) i)) match_Integer_binop _ _ _ _ _ = Nothing -- This helper is used for the quotRem and divMod functions -match_Integer_divop_both :: (Integer -> Integer -> (Integer, Integer)) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_divop_both divop _ _ id_unf [xl,yl] +match_Integer_divop_both + :: (Integer -> Integer -> (Integer, Integer)) -> RuleFun +match_Integer_divop_both divop _ id_unf _ [xl,yl] | Just (LitInteger x t) <- exprIsLiteral_maybe id_unf xl , Just (LitInteger y _) <- exprIsLiteral_maybe id_unf yl , y /= 0 @@ -1102,50 +1087,30 @@ match_Integer_divop_both divop _ _ id_unf [xl,yl] match_Integer_divop_both _ _ _ _ _ = Nothing -- This helper is used for the quotRem and divMod functions -match_Integer_divop_one :: (Integer -> Integer -> Integer) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_divop_one divop _ _ id_unf [xl,yl] +match_Integer_divop_one :: (Integer -> Integer -> Integer) -> RuleFun +match_Integer_divop_one divop _ id_unf _ [xl,yl] | Just (LitInteger x i) <- exprIsLiteral_maybe id_unf xl , Just (LitInteger y _) <- exprIsLiteral_maybe id_unf yl , y /= 0 = Just (Lit (LitInteger (x `divop` y) i)) match_Integer_divop_one _ _ _ _ _ = Nothing -match_Integer_Int_binop :: (Integer -> Int -> Integer) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_Int_binop binop _ _ id_unf [xl,yl] +match_Integer_Int_binop :: (Integer -> Int -> Integer) -> RuleFun +match_Integer_Int_binop binop _ id_unf _ [xl,yl] | Just (LitInteger x i) <- exprIsLiteral_maybe id_unf xl , Just (MachInt y) <- exprIsLiteral_maybe id_unf yl = Just (Lit (LitInteger (x `binop` fromIntegral y) i)) match_Integer_Int_binop _ _ _ _ _ = Nothing -match_Integer_binop_Bool :: (Integer -> Integer -> Bool) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_binop_Bool binop _ _ id_unf [xl, yl] +match_Integer_binop_Bool :: (Integer -> Integer -> Bool) -> RuleFun +match_Integer_binop_Bool binop _ id_unf _ [xl, yl] | Just (LitInteger x _) <- exprIsLiteral_maybe id_unf xl , Just (LitInteger y _) <- exprIsLiteral_maybe id_unf yl = Just (if x `binop` y then trueVal else falseVal) match_Integer_binop_Bool _ _ _ _ _ = Nothing -match_Integer_binop_Ordering :: (Integer -> Integer -> Ordering) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_binop_Ordering binop _ _ id_unf [xl, yl] +match_Integer_binop_Ordering :: (Integer -> Integer -> Ordering) -> RuleFun +match_Integer_binop_Ordering binop _ id_unf _ [xl, yl] | Just (LitInteger x _) <- exprIsLiteral_maybe id_unf xl , Just (LitInteger y _) <- exprIsLiteral_maybe id_unf yl = Just $ case x `binop` y of @@ -1156,12 +1121,8 @@ match_Integer_binop_Ordering _ _ _ _ _ = Nothing match_Integer_Int_encodeFloat :: RealFloat a => (a -> Expr CoreBndr) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_Integer_Int_encodeFloat mkLit _ _ id_unf [xl,yl] + -> RuleFun +match_Integer_Int_encodeFloat mkLit _ id_unf _ [xl,yl] | Just (LitInteger x _) <- exprIsLiteral_maybe id_unf xl , Just (MachInt y) <- exprIsLiteral_maybe id_unf yl = Just (mkLit $ encodeFloat x (fromInteger y)) @@ -1179,24 +1140,16 @@ match_Integer_Int_encodeFloat _ _ _ _ _ = Nothing -- NaN or +-Inf match_rationalTo :: RealFloat a => (a -> Expr CoreBndr) - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_rationalTo mkLit _ _ id_unf [xl, yl] + -> RuleFun +match_rationalTo mkLit _ id_unf _ [xl, yl] | Just (LitInteger x _) <- exprIsLiteral_maybe id_unf xl , Just (LitInteger y _) <- exprIsLiteral_maybe id_unf yl , y /= 0 = Just (mkLit (fromRational (x % y))) match_rationalTo _ _ _ _ _ = Nothing -match_decodeDouble :: DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) -match_decodeDouble _ fn id_unf [xl] +match_decodeDouble :: RuleFun +match_decodeDouble _ id_unf fn [xl] | Just (MachDouble x) <- exprIsLiteral_maybe id_unf xl = case idType fn of FunTy _ (TyConApp _ [integerTy, intHashTy]) -> @@ -1211,23 +1164,13 @@ match_decodeDouble _ fn id_unf [xl] panic "match_decodeDouble: Id has the wrong type" match_decodeDouble _ _ _ _ = Nothing -match_XToIntegerToX :: Name - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) +match_XToIntegerToX :: Name -> RuleFun match_XToIntegerToX n _ _ _ [App (Var x) y] | idName x == n = Just y match_XToIntegerToX _ _ _ _ _ = Nothing -match_smallIntegerTo :: PrimOp - -> DynFlags - -> Id - -> IdUnfoldingFun - -> [Expr CoreBndr] - -> Maybe (Expr CoreBndr) +match_smallIntegerTo :: PrimOp -> RuleFun match_smallIntegerTo primOp _ _ _ [App (Var x) y] | idName x == smallIntegerName = Just $ App (Var (mkPrimOpId primOp)) y diff --git a/compiler/prelude/TysPrim.lhs b/compiler/prelude/TysPrim.lhs index bf8f62a910df..a10300a99cfe 100644 --- a/compiler/prelude/TysPrim.lhs +++ b/compiler/prelude/TysPrim.lhs @@ -346,7 +346,7 @@ mkPrimTyConName occ key tycon = mkWiredInName gHC_PRIM (mkTcOccFS occ) \begin{code} kindTyConType :: TyCon -> Type -kindTyConType kind = TyConApp kind [] +kindTyConType kind = TyConApp kind [] -- mkTyConApp isn't defined yet -- | See "Type#kind_subtyping" for details of the distinction between these 'Kind's anyKind, liftedTypeKind, unliftedTypeKind, openTypeKind, constraintKind, superKind :: Kind @@ -471,7 +471,7 @@ keep different state threads separate. It is represented by nothing at all. \begin{code} mkStatePrimTy :: Type -> Type -mkStatePrimTy ty = mkNakedTyConApp statePrimTyCon [ty] +mkStatePrimTy ty = TyConApp statePrimTyCon [ty] statePrimTyCon :: TyCon -- See Note [The State# TyCon] statePrimTyCon = pcPrimTyCon statePrimTyConName 1 VoidRep @@ -517,17 +517,17 @@ arrayArrayPrimTyCon = pcPrimTyCon0 arrayArrayPrimTyConName PtrRe mutableArrayArrayPrimTyCon = pcPrimTyCon mutableArrayArrayPrimTyConName 1 PtrRep mkArrayPrimTy :: Type -> Type -mkArrayPrimTy elt = mkNakedTyConApp arrayPrimTyCon [elt] +mkArrayPrimTy elt = TyConApp arrayPrimTyCon [elt] byteArrayPrimTy :: Type byteArrayPrimTy = mkTyConTy byteArrayPrimTyCon mkArrayArrayPrimTy :: Type mkArrayArrayPrimTy = mkTyConTy arrayArrayPrimTyCon mkMutableArrayPrimTy :: Type -> Type -> Type -mkMutableArrayPrimTy s elt = mkNakedTyConApp mutableArrayPrimTyCon [s, elt] +mkMutableArrayPrimTy s elt = TyConApp mutableArrayPrimTyCon [s, elt] mkMutableByteArrayPrimTy :: Type -> Type -mkMutableByteArrayPrimTy s = mkNakedTyConApp mutableByteArrayPrimTyCon [s] +mkMutableByteArrayPrimTy s = TyConApp mutableByteArrayPrimTyCon [s] mkMutableArrayArrayPrimTy :: Type -> Type -mkMutableArrayArrayPrimTy s = mkNakedTyConApp mutableArrayArrayPrimTyCon [s] +mkMutableArrayArrayPrimTy s = TyConApp mutableArrayArrayPrimTyCon [s] \end{code} %************************************************************************ @@ -541,7 +541,7 @@ mutVarPrimTyCon :: TyCon mutVarPrimTyCon = pcPrimTyCon mutVarPrimTyConName 2 PtrRep mkMutVarPrimTy :: Type -> Type -> Type -mkMutVarPrimTy s elt = mkNakedTyConApp mutVarPrimTyCon [s, elt] +mkMutVarPrimTy s elt = TyConApp mutVarPrimTyCon [s, elt] \end{code} %************************************************************************ @@ -555,7 +555,7 @@ mVarPrimTyCon :: TyCon mVarPrimTyCon = pcPrimTyCon mVarPrimTyConName 2 PtrRep mkMVarPrimTy :: Type -> Type -> Type -mkMVarPrimTy s elt = mkNakedTyConApp mVarPrimTyCon [s, elt] +mkMVarPrimTy s elt = TyConApp mVarPrimTyCon [s, elt] \end{code} %************************************************************************ @@ -569,7 +569,7 @@ tVarPrimTyCon :: TyCon tVarPrimTyCon = pcPrimTyCon tVarPrimTyConName 2 PtrRep mkTVarPrimTy :: Type -> Type -> Type -mkTVarPrimTy s elt = mkNakedTyConApp tVarPrimTyCon [s, elt] +mkTVarPrimTy s elt = TyConApp tVarPrimTyCon [s, elt] \end{code} %************************************************************************ @@ -583,7 +583,7 @@ stablePtrPrimTyCon :: TyCon stablePtrPrimTyCon = pcPrimTyCon stablePtrPrimTyConName 1 AddrRep mkStablePtrPrimTy :: Type -> Type -mkStablePtrPrimTy ty = mkNakedTyConApp stablePtrPrimTyCon [ty] +mkStablePtrPrimTy ty = TyConApp stablePtrPrimTyCon [ty] \end{code} %************************************************************************ @@ -597,7 +597,7 @@ stableNamePrimTyCon :: TyCon stableNamePrimTyCon = pcPrimTyCon stableNamePrimTyConName 1 PtrRep mkStableNamePrimTy :: Type -> Type -mkStableNamePrimTy ty = mkNakedTyConApp stableNamePrimTyCon [ty] +mkStableNamePrimTy ty = TyConApp stableNamePrimTyCon [ty] \end{code} %************************************************************************ @@ -624,7 +624,7 @@ weakPrimTyCon :: TyCon weakPrimTyCon = pcPrimTyCon weakPrimTyConName 1 PtrRep mkWeakPrimTy :: Type -> Type -mkWeakPrimTy v = mkNakedTyConApp weakPrimTyCon [v] +mkWeakPrimTy v = TyConApp weakPrimTyCon [v] \end{code} %************************************************************************ @@ -743,7 +743,7 @@ anyTyCon = mkSynTyCon anyTyConName kind [kKiVar] -} anyTypeOfKind :: Kind -> Type -anyTypeOfKind kind = mkNakedTyConApp anyTyCon [kind] +anyTypeOfKind kind = TyConApp anyTyCon [kind] \end{code} %************************************************************************ diff --git a/compiler/prelude/primops.txt.pp b/compiler/prelude/primops.txt.pp index 45472816c0e9..7203c1138983 100644 --- a/compiler/prelude/primops.txt.pp +++ b/compiler/prelude/primops.txt.pp @@ -1871,8 +1871,15 @@ has_side_effects = True out_of_line = True -primop MkWeakForeignEnvOp "mkWeakForeignEnv#" GenPrimOp - o -> b -> Addr# -> Addr# -> Int# -> Addr# -> State# RealWorld -> (# State# RealWorld, Weak# b #) +primop AddCFinalizerToWeakOp "addCFinalizerToWeak#" GenPrimOp + Addr# -> Addr# -> Int# -> Addr# -> Weak# b + -> State# RealWorld -> (# State# RealWorld, Int# #) + { {\tt addCFinalizerToWeak# fptr ptr flag eptr w} attaches a C + function pointer {\tt fptr} to a weak pointer {\tt w} as a finalizer. If + {\tt flag} is zero, {\tt fptr} will be called with one argument, + {\tt ptr}. Otherwise, it will be called with two arguments, + {\tt eptr} and {\tt ptr}. {\tt addCFinalizerToWeak#} returns + 1 on success, or 0 if {\tt w} is already dead. } with has_side_effects = True out_of_line = True diff --git a/compiler/rename/RnEnv.lhs b/compiler/rename/RnEnv.lhs index ae0fbb99b223..d3517ce52d61 100644 --- a/compiler/rename/RnEnv.lhs +++ b/compiler/rename/RnEnv.lhs @@ -815,7 +815,7 @@ lookupQualifiedName rdr_name | avail <- mi_exports iface, name <- availNames avail, nameOccName name == occ ] of - (n:ns) -> ASSERT (null ns) return (Just n) + (n:ns) -> ASSERT(null ns) return (Just n) _ -> do { traceRn (text "lookupQualified" <+> ppr rdr_name) ; return Nothing } diff --git a/compiler/rename/RnExpr.lhs b/compiler/rename/RnExpr.lhs index 29674ca34cf4..21f3bded95de 100644 --- a/compiler/rename/RnExpr.lhs +++ b/compiler/rename/RnExpr.lhs @@ -10,45 +10,38 @@ general, all of these functions return a renamed thing, and a set of free variables. \begin{code} -{-# OPTIONS -fno-warn-tabs #-} --- The above warning supression flag is a temporary kludge. --- While working on this module you are encouraged to remove it and --- detab the module (please do the detabbing in a separate patch). See --- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces --- for details - module RnExpr ( - rnLExpr, rnExpr, rnStmts + rnLExpr, rnExpr, rnStmts ) where #include "HsVersions.h" #ifdef GHCI import {-# SOURCE #-} TcSplice( runQuasiQuoteExpr ) -#endif /* GHCI */ +#endif /* GHCI */ import RnSource ( rnSrcDecls, findSplice ) import RnBinds ( rnLocalBindsAndThen, rnLocalValBindsLHS, rnLocalValBindsRHS, - rnMatchGroup, rnGRHS, makeMiniFixityEnv) + rnMatchGroup, rnGRHS, makeMiniFixityEnv) import HsSyn import TcRnMonad -import TcEnv ( thRnBrack ) +import TcEnv ( thRnBrack ) import RnEnv import RnTypes import RnPat import DynFlags -import BasicTypes ( FixityDirection(..) ) +import BasicTypes ( FixityDirection(..) ) import PrelNames import Module import Name import NameSet import RdrName -import LoadIface ( loadInterfaceForName ) +import LoadIface ( loadInterfaceForName ) import UniqSet import Data.List import Util -import ListSetOps ( removeDups ) +import ListSetOps ( removeDups ) import Outputable import SrcLoc import FastString @@ -67,9 +60,9 @@ thenM_ = (>>) \end{code} %************************************************************************ -%* * +%* * \subsubsection{Expressions} -%* * +%* * %************************************************************************ \begin{code} @@ -78,18 +71,18 @@ rnExprs ls = rnExprs' ls emptyUniqSet where rnExprs' [] acc = return ([], acc) rnExprs' (expr:exprs) acc - = rnLExpr expr `thenM` \ (expr', fvExpr) -> + = rnLExpr expr `thenM` \ (expr', fvExpr) -> - -- Now we do a "seq" on the free vars because typically it's small - -- or empty, especially in very long lists of constants + -- Now we do a "seq" on the free vars because typically it's small + -- or empty, especially in very long lists of constants let - acc' = acc `plusFV` fvExpr + acc' = acc `plusFV` fvExpr in acc' `seq` rnExprs' exprs acc' `thenM` \ (exprs', fvExprs) -> return (expr':exprs', fvExprs) \end{code} -Variables. We look up the variable and return the resulting name. +Variables. We look up the variable and return the resulting name. \begin{code} rnLExpr :: LHsExpr RdrName -> RnM (LHsExpr Name, FreeVars) @@ -101,12 +94,12 @@ finishHsVar :: Name -> RnM (HsExpr Name, FreeVars) -- Separated from rnExpr because it's also used -- when renaming infix expressions -- See Note [Adding the implicit parameter to 'assert'] -finishHsVar name +finishHsVar name = do { ignore_asserts <- goptM Opt_IgnoreAsserts ; if ignore_asserts || not (name `hasKey` assertIdKey) - then return (HsVar name, unitFV name) - else do { e <- mkAssertErrorExpr - ; return (e, unitFV name) } } + then return (HsVar name, unitFV name) + else do { e <- mkAssertErrorExpr + ; return (e, unitFV name) } } rnExpr (HsVar v) = do { mb_name <- lookupOccRn_maybe v @@ -115,11 +108,11 @@ rnExpr (HsVar v) ; if opt_TypeHoles && startsWithUnderscore (rdrNameOcc v) then return (HsUnboundVar v, emptyFVs) else do { n <- reportUnboundName v; finishHsVar n } } ; - Just name + Just name | name == nilDataConName -- Treat [] as an ExplicitList, so that -- OverloadedLists works correctly -> rnExpr (ExplicitList placeHolderType Nothing []) - | otherwise + | otherwise -> finishHsVar name } } rnExpr (HsIPVar v) @@ -130,48 +123,48 @@ rnExpr (HsLit lit@(HsString s)) opt_OverloadedStrings <- xoptM Opt_OverloadedStrings ; if opt_OverloadedStrings then rnExpr (HsOverLit (mkHsIsString s placeHolderType)) - else -- Same as below - rnLit lit `thenM_` + else -- Same as below + rnLit lit `thenM_` return (HsLit lit, emptyFVs) } -rnExpr (HsLit lit) - = rnLit lit `thenM_` +rnExpr (HsLit lit) + = rnLit lit `thenM_` return (HsLit lit, emptyFVs) -rnExpr (HsOverLit lit) - = rnOverLit lit `thenM` \ (lit', fvs) -> +rnExpr (HsOverLit lit) + = rnOverLit lit `thenM` \ (lit', fvs) -> return (HsOverLit lit', fvs) rnExpr (HsApp fun arg) - = rnLExpr fun `thenM` \ (fun',fvFun) -> - rnLExpr arg `thenM` \ (arg',fvArg) -> + = rnLExpr fun `thenM` \ (fun',fvFun) -> + rnLExpr arg `thenM` \ (arg',fvArg) -> return (HsApp fun' arg', fvFun `plusFV` fvArg) rnExpr (OpApp e1 (L op_loc (HsVar op_rdr)) _ e2) = do { (e1', fv_e1) <- rnLExpr e1 - ; (e2', fv_e2) <- rnLExpr e2 - ; op_name <- setSrcSpan op_loc (lookupOccRn op_rdr) - ; (op', fv_op) <- finishHsVar op_name - -- NB: op' is usually just a variable, but might be - -- an applicatoin (assert "Foo.hs:47") - -- Deal with fixity - -- When renaming code synthesised from "deriving" declarations - -- we used to avoid fixity stuff, but we can't easily tell any - -- more, so I've removed the test. Adding HsPars in TcGenDeriv - -- should prevent bad things happening. - ; fixity <- lookupFixityRn op_name - ; final_e <- mkOpAppRn e1' (L op_loc op') fixity e2' - ; return (final_e, fv_e1 `plusFV` fv_op `plusFV` fv_e2) } + ; (e2', fv_e2) <- rnLExpr e2 + ; op_name <- setSrcSpan op_loc (lookupOccRn op_rdr) + ; (op', fv_op) <- finishHsVar op_name + -- NB: op' is usually just a variable, but might be + -- an applicatoin (assert "Foo.hs:47") + -- Deal with fixity + -- When renaming code synthesised from "deriving" declarations + -- we used to avoid fixity stuff, but we can't easily tell any + -- more, so I've removed the test. Adding HsPars in TcGenDeriv + -- should prevent bad things happening. + ; fixity <- lookupFixityRn op_name + ; final_e <- mkOpAppRn e1' (L op_loc op') fixity e2' + ; return (final_e, fv_e1 `plusFV` fv_op `plusFV` fv_e2) } rnExpr (OpApp _ other_op _ _) = failWith (vcat [ hang (ptext (sLit "Infix application with a non-variable operator:")) 2 (ppr other_op) , ptext (sLit "(Probably resulting from a Template Haskell splice)") ]) rnExpr (NegApp e _) - = rnLExpr e `thenM` \ (e', fv_e) -> - lookupSyntaxName negateName `thenM` \ (neg_name, fv_neg) -> - mkNegAppRn e' neg_name `thenM` \ final_e -> + = rnLExpr e `thenM` \ (e', fv_e) -> + lookupSyntaxName negateName `thenM` \ (neg_name, fv_neg) -> + mkNegAppRn e' neg_name `thenM` \ final_e -> return (final_e, fv_e `plusFV` fv_neg) ------------------------------------------ @@ -189,36 +182,36 @@ rnExpr e@(HsBracket br_body) return (HsBracket body', fvs_e) rnExpr (HsSpliceE splice) - = rnSplice splice `thenM` \ (splice', fvs) -> + = rnSplice splice `thenM` \ (splice', fvs) -> return (HsSpliceE splice', fvs) #ifndef GHCI rnExpr e@(HsQuasiQuoteE _) = pprPanic "Cant do quasiquotation without GHCi" (ppr e) #else rnExpr (HsQuasiQuoteE qq) - = runQuasiQuoteExpr qq `thenM` \ (L _ expr') -> + = runQuasiQuoteExpr qq `thenM` \ (L _ expr') -> rnExpr expr' -#endif /* GHCI */ +#endif /* GHCI */ --------------------------------------------- --- Sections +-- Sections -- See Note [Parsing sections] in Parser.y.pp rnExpr (HsPar (L loc (section@(SectionL {})))) - = do { (section', fvs) <- rnSection section - ; return (HsPar (L loc section'), fvs) } + = do { (section', fvs) <- rnSection section + ; return (HsPar (L loc section'), fvs) } rnExpr (HsPar (L loc (section@(SectionR {})))) - = do { (section', fvs) <- rnSection section - ; return (HsPar (L loc section'), fvs) } + = do { (section', fvs) <- rnSection section + ; return (HsPar (L loc section'), fvs) } rnExpr (HsPar e) - = do { (e', fvs_e) <- rnLExpr e - ; return (HsPar e', fvs_e) } + = do { (e', fvs_e) <- rnLExpr e + ; return (HsPar e', fvs_e) } rnExpr expr@(SectionL {}) - = do { addErr (sectionErr expr); rnSection expr } + = do { addErr (sectionErr expr); rnSection expr } rnExpr expr@(SectionR {}) - = do { addErr (sectionErr expr); rnSection expr } + = do { addErr (sectionErr expr); rnSection expr } --------------------------------------------- rnExpr (HsCoreAnn ann expr) @@ -226,10 +219,10 @@ rnExpr (HsCoreAnn ann expr) return (HsCoreAnn ann expr', fvs_expr) rnExpr (HsSCC lbl expr) - = rnLExpr expr `thenM` \ (expr', fvs_expr) -> + = rnLExpr expr `thenM` \ (expr', fvs_expr) -> return (HsSCC lbl expr', fvs_expr) rnExpr (HsTickPragma info expr) - = rnLExpr expr `thenM` \ (expr', fvs_expr) -> + = rnLExpr expr `thenM` \ (expr', fvs_expr) -> return (HsTickPragma info expr', fvs_expr) rnExpr (HsLam matches) @@ -237,7 +230,7 @@ rnExpr (HsLam matches) return (HsLam matches', fvMatch) rnExpr (HsLamCase arg matches) - = rnMatchGroup CaseAlt rnLExpr matches `thenM` \ (matches', fvs_ms) -> + = rnMatchGroup CaseAlt rnLExpr matches `thenM` \ (matches', fvs_ms) -> return (HsLamCase arg matches', fvs_ms) rnExpr (HsCase expr matches) @@ -246,26 +239,26 @@ rnExpr (HsCase expr matches) return (HsCase new_expr new_matches, e_fvs `plusFV` ms_fvs) rnExpr (HsLet binds expr) - = rnLocalBindsAndThen binds $ \ binds' -> - rnLExpr expr `thenM` \ (expr',fvExpr) -> + = rnLocalBindsAndThen binds $ \ binds' -> + rnLExpr expr `thenM` \ (expr',fvExpr) -> return (HsLet binds' expr', fvExpr) rnExpr (HsDo do_or_lc stmts _) - = do { ((stmts', _), fvs) <- rnStmts do_or_lc rnLExpr stmts (\ _ -> return ((), emptyFVs)) - ; return ( HsDo do_or_lc stmts' placeHolderType, fvs ) } + = do { ((stmts', _), fvs) <- rnStmts do_or_lc rnLExpr stmts (\ _ -> return ((), emptyFVs)) + ; return ( HsDo do_or_lc stmts' placeHolderType, fvs ) } rnExpr (ExplicitList _ _ exps) = do { opt_OverloadedLists <- xoptM Opt_OverloadedLists ; (exps', fvs) <- rnExprs exps - ; if opt_OverloadedLists + ; if opt_OverloadedLists then do { - ; (from_list_n_name, fvs') <- lookupSyntaxName fromListNName - ; return (ExplicitList placeHolderType (Just from_list_n_name) exps', fvs `plusFV` fvs') } + ; (from_list_n_name, fvs') <- lookupSyntaxName fromListNName + ; return (ExplicitList placeHolderType (Just from_list_n_name) exps', fvs `plusFV` fvs') } else return (ExplicitList placeHolderType Nothing exps', fvs) } rnExpr (ExplicitPArr _ exps) - = rnExprs exps `thenM` \ (exps', fvs) -> + = rnExprs exps `thenM` \ (exps', fvs) -> return (ExplicitPArr placeHolderType exps', fvs) rnExpr (ExplicitTuple tup_args boxity) @@ -278,22 +271,22 @@ rnExpr (ExplicitTuple tup_args boxity) rnTupArg (Missing _) = return (Missing placeHolderType, emptyFVs) rnExpr (RecordCon con_id _ rbinds) - = do { conname <- lookupLocatedOccRn con_id - ; (rbinds', fvRbinds) <- rnHsRecBinds (HsRecFieldCon (unLoc conname)) rbinds - ; return (RecordCon conname noPostTcExpr rbinds', - fvRbinds `addOneFV` unLoc conname) } + = do { conname <- lookupLocatedOccRn con_id + ; (rbinds', fvRbinds) <- rnHsRecBinds (HsRecFieldCon (unLoc conname)) rbinds + ; return (RecordCon conname noPostTcExpr rbinds', + fvRbinds `addOneFV` unLoc conname) } rnExpr (RecordUpd expr rbinds _ _ _) - = do { (expr', fvExpr) <- rnLExpr expr - ; (rbinds', fvRbinds) <- rnHsRecBinds HsRecFieldUpd rbinds - ; return (RecordUpd expr' rbinds' [] [] [], - fvExpr `plusFV` fvRbinds) } + = do { (expr', fvExpr) <- rnLExpr expr + ; (rbinds', fvRbinds) <- rnHsRecBinds HsRecFieldUpd rbinds + ; return (RecordUpd expr' rbinds' [] [] [], + fvExpr `plusFV` fvRbinds) } rnExpr (ExprWithTySig expr pty) - = do { (pty', fvTy) <- rnLHsType ExprWithTySigCtx pty - ; (expr', fvExpr) <- bindSigTyVarsFV (hsExplicitTvs pty') $ - rnLExpr expr - ; return (ExprWithTySig expr' pty', fvExpr `plusFV` fvTy) } + = do { (pty', fvTy) <- rnLHsType ExprWithTySigCtx pty + ; (expr', fvExpr) <- bindSigTyVarsFV (hsExplicitTvs pty') $ + rnLExpr expr + ; return (ExprWithTySig expr' pty', fvExpr `plusFV` fvTy) } rnExpr (HsIf _ p b1 b2) = do { (p', fvP) <- rnLExpr p @@ -307,21 +300,21 @@ rnExpr (HsMultiIf ty alts) ; return (HsMultiIf ty alts', fvs) } rnExpr (HsType a) - = rnLHsType HsTypeCtx a `thenM` \ (t, fvT) -> + = rnLHsType HsTypeCtx a `thenM` \ (t, fvT) -> return (HsType t, fvT) rnExpr (ArithSeq _ _ seq) = do { opt_OverloadedLists <- xoptM Opt_OverloadedLists ; (new_seq, fvs) <- rnArithSeq seq - ; if opt_OverloadedLists + ; if opt_OverloadedLists then do { - ; (from_list_name, fvs') <- lookupSyntaxName fromListName - ; return (ArithSeq noPostTcExpr (Just from_list_name) new_seq, fvs `plusFV` fvs') } + ; (from_list_name, fvs') <- lookupSyntaxName fromListName + ; return (ArithSeq noPostTcExpr (Just from_list_name) new_seq, fvs `plusFV` fvs') } else return (ArithSeq noPostTcExpr Nothing new_seq, fvs) } rnExpr (PArrSeq _ seq) - = rnArithSeq seq `thenM` \ (new_seq, fvs) -> + = rnArithSeq seq `thenM` \ (new_seq, fvs) -> return (PArrSeq noPostTcExpr new_seq, fvs) \end{code} @@ -341,16 +334,16 @@ rnExpr e@(ELazyPat {}) = patSynErr e \end{code} %************************************************************************ -%* * - Arrow notation -%* * +%* * + Arrow notation +%* * %************************************************************************ \begin{code} rnExpr (HsProc pat body) = newArrowScope $ rnPat ProcExpr pat $ \ pat' -> - rnCmdTop body `thenM` \ (body',fvBody) -> + rnCmdTop body `thenM` \ (body',fvBody) -> return (HsProc pat' body', fvBody) -- Ideally, these would be done in parsing, but to keep parsing simple, we do it here. @@ -358,7 +351,7 @@ rnExpr e@(HsArrApp {}) = arrowFail e rnExpr e@(HsArrForm {}) = arrowFail e rnExpr other = pprPanic "rnExpr: unexpected expression" (ppr other) - -- HsWrap + -- HsWrap hsHoleExpr :: HsExpr Name hsHoleExpr = HsUnboundVar (mkRdrUnqual (mkVarOcc "_")) @@ -375,24 +368,24 @@ arrowFail e -- See Note [Parsing sections] in Parser.y.pp rnSection :: HsExpr RdrName -> RnM (HsExpr Name, FreeVars) rnSection section@(SectionR op expr) - = do { (op', fvs_op) <- rnLExpr op - ; (expr', fvs_expr) <- rnLExpr expr - ; checkSectionPrec InfixR section op' expr' - ; return (SectionR op' expr', fvs_op `plusFV` fvs_expr) } + = do { (op', fvs_op) <- rnLExpr op + ; (expr', fvs_expr) <- rnLExpr expr + ; checkSectionPrec InfixR section op' expr' + ; return (SectionR op' expr', fvs_op `plusFV` fvs_expr) } rnSection section@(SectionL expr op) - = do { (expr', fvs_expr) <- rnLExpr expr - ; (op', fvs_op) <- rnLExpr op - ; checkSectionPrec InfixL section op' expr' - ; return (SectionL expr' op', fvs_op `plusFV` fvs_expr) } + = do { (expr', fvs_expr) <- rnLExpr expr + ; (op', fvs_op) <- rnLExpr op + ; checkSectionPrec InfixL section op' expr' + ; return (SectionL expr' op', fvs_op `plusFV` fvs_expr) } rnSection other = pprPanic "rnSection" (ppr other) \end{code} %************************************************************************ -%* * - Records -%* * +%* * + Records +%* * %************************************************************************ \begin{code} @@ -401,40 +394,40 @@ rnHsRecBinds :: HsRecFieldContext -> HsRecordBinds RdrName rnHsRecBinds ctxt rec_binds@(HsRecFields { rec_dotdot = dd }) = do { (flds, fvs) <- rnHsRecFields1 ctxt HsVar rec_binds ; (flds', fvss) <- mapAndUnzipM rn_field flds - ; return (HsRecFields { rec_flds = flds', rec_dotdot = dd }, + ; return (HsRecFields { rec_flds = flds', rec_dotdot = dd }, fvs `plusFV` plusFVs fvss) } - where + where rn_field fld = do { (arg', fvs) <- rnLExpr (hsRecFieldArg fld) ; return (fld { hsRecFieldArg = arg' }, fvs) } \end{code} %************************************************************************ -%* * - Arrow commands -%* * +%* * + Arrow commands +%* * %************************************************************************ \begin{code} rnCmdArgs :: [LHsCmdTop RdrName] -> RnM ([LHsCmdTop Name], FreeVars) rnCmdArgs [] = return ([], emptyFVs) rnCmdArgs (arg:args) - = rnCmdTop arg `thenM` \ (arg',fvArg) -> - rnCmdArgs args `thenM` \ (args',fvArgs) -> + = rnCmdTop arg `thenM` \ (arg',fvArg) -> + rnCmdArgs args `thenM` \ (args',fvArgs) -> return (arg':args', fvArg `plusFV` fvArgs) rnCmdTop :: LHsCmdTop RdrName -> RnM (LHsCmdTop Name, FreeVars) rnCmdTop = wrapLocFstM rnCmdTop' where - rnCmdTop' (HsCmdTop cmd _ _ _) + rnCmdTop' (HsCmdTop cmd _ _ _) = do { (cmd', fvCmd) <- rnLCmd cmd ; let cmd_names = [arrAName, composeAName, firstAName] ++ - nameSetToList (methodNamesCmd (unLoc cmd')) - -- Generate the rebindable syntax for the monad + nameSetToList (methodNamesCmd (unLoc cmd')) + -- Generate the rebindable syntax for the monad ; (cmd_names', cmd_fvs) <- lookupSyntaxNames cmd_names - ; return (HsCmdTop cmd' placeHolderType placeHolderType (cmd_names `zip` cmd_names'), - fvCmd `plusFV` cmd_fvs) } + ; return (HsCmdTop cmd' placeHolderType placeHolderType (cmd_names `zip` cmd_names'), + fvCmd `plusFV` cmd_fvs) } rnLCmd :: LHsCmd RdrName -> RnM (LHsCmd Name, FreeVars) rnLCmd = wrapLocFstM rnCmd @@ -451,10 +444,10 @@ rnCmd (HsCmdArrApp arrow arg _ ho rtl) HsHigherOrderApp -> tc HsFirstOrderApp -> escapeArrowScope tc -- See Note [Escaping the arrow scope] in TcRnTypes - -- Before renaming 'arrow', use the environment of the enclosing - -- proc for the (-<) case. - -- Local bindings, inside the enclosing proc, are not in scope - -- inside 'arrow'. In the higher-order case (-<<), they are. + -- Before renaming 'arrow', use the environment of the enclosing + -- proc for the (-<) case. + -- Local bindings, inside the enclosing proc, are not in scope + -- inside 'arrow'. In the higher-order case (-<<), they are. -- infix form rnCmd (HsCmdArrForm op (Just _) [arg1, arg2]) @@ -467,7 +460,7 @@ rnCmd (HsCmdArrForm op (Just _) [arg1, arg2]) -- Deal with fixity lookupFixityRn op_name `thenM` \ fixity -> - mkOpFormRn arg1' op' fixity arg2' `thenM` \ final_e -> + mkOpFormRn arg1' op' fixity arg2' `thenM` \ final_e -> return (final_e, fv_arg1 `plusFV` fv_op `plusFV` fv_arg2) @@ -514,8 +507,8 @@ rnCmd (HsCmdDo stmts _) rnCmd cmd@(HsCmdCast {}) = pprPanic "rnCmd" (ppr cmd) --------------------------------------------------- -type CmdNeeds = FreeVars -- Only inhabitants are - -- appAName, choiceAName, loopAName +type CmdNeeds = FreeVars -- Only inhabitants are + -- appAName, choiceAName, loopAName -- find what methods the Cmd needs (loop, choice, apply) methodNamesLCmd :: LHsCmd Name -> CmdNeeds @@ -536,7 +529,7 @@ methodNamesCmd (HsCmdIf _ _ c1 c2) = methodNamesLCmd c1 `plusFV` methodNamesLCmd c2 `addOneFV` choiceAName methodNamesCmd (HsCmdLet _ c) = methodNamesLCmd c -methodNamesCmd (HsCmdDo stmts _) = methodNamesStmts stmts +methodNamesCmd (HsCmdDo stmts _) = methodNamesStmts stmts methodNamesCmd (HsCmdApp c _) = methodNamesLCmd c methodNamesCmd (HsCmdLam match) = methodNamesMatch match @@ -544,7 +537,7 @@ methodNamesCmd (HsCmdCase _ matches) = methodNamesMatch matches `addOneFV` choiceAName --methodNamesCmd _ = emptyFVs - -- Other forms can't occur in commands, but it's not convenient + -- Other forms can't occur in commands, but it's not convenient -- to error here so we just do what's convenient. -- The type checker will complain later @@ -552,7 +545,7 @@ methodNamesCmd (HsCmdCase _ matches) methodNamesMatch :: MatchGroup Name (LHsCmd Name) -> FreeVars methodNamesMatch (MG { mg_alts = ms }) = plusFVs (map do_one ms) - where + where do_one (L _ (Match _ _ grhss)) = methodNamesGRHSs grhss ------------------------------------------------- @@ -581,107 +574,107 @@ methodNamesStmt (RecStmt { recS_stmts = stmts }) = methodNamesStmts stmts `addOn methodNamesStmt (LetStmt {}) = emptyFVs methodNamesStmt (ParStmt {}) = emptyFVs methodNamesStmt (TransStmt {}) = emptyFVs - -- ParStmt and TransStmt can't occur in commands, but it's not convenient to error + -- ParStmt and TransStmt can't occur in commands, but it's not convenient to error -- here so we just do what's convenient \end{code} %************************************************************************ -%* * - Arithmetic sequences -%* * +%* * + Arithmetic sequences +%* * %************************************************************************ \begin{code} rnArithSeq :: ArithSeqInfo RdrName -> RnM (ArithSeqInfo Name, FreeVars) rnArithSeq (From expr) - = rnLExpr expr `thenM` \ (expr', fvExpr) -> + = rnLExpr expr `thenM` \ (expr', fvExpr) -> return (From expr', fvExpr) rnArithSeq (FromThen expr1 expr2) - = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) -> - rnLExpr expr2 `thenM` \ (expr2', fvExpr2) -> + = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) -> + rnLExpr expr2 `thenM` \ (expr2', fvExpr2) -> return (FromThen expr1' expr2', fvExpr1 `plusFV` fvExpr2) rnArithSeq (FromTo expr1 expr2) - = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) -> - rnLExpr expr2 `thenM` \ (expr2', fvExpr2) -> + = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) -> + rnLExpr expr2 `thenM` \ (expr2', fvExpr2) -> return (FromTo expr1' expr2', fvExpr1 `plusFV` fvExpr2) rnArithSeq (FromThenTo expr1 expr2 expr3) - = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) -> - rnLExpr expr2 `thenM` \ (expr2', fvExpr2) -> - rnLExpr expr3 `thenM` \ (expr3', fvExpr3) -> + = rnLExpr expr1 `thenM` \ (expr1', fvExpr1) -> + rnLExpr expr2 `thenM` \ (expr2', fvExpr2) -> + rnLExpr expr3 `thenM` \ (expr3', fvExpr3) -> return (FromThenTo expr1' expr2' expr3', - plusFVs [fvExpr1, fvExpr2, fvExpr3]) + plusFVs [fvExpr1, fvExpr2, fvExpr3]) \end{code} %************************************************************************ -%* * - Template Haskell brackets -%* * +%* * + Template Haskell brackets +%* * %************************************************************************ \begin{code} rnBracket :: HsBracket RdrName -> RnM (HsBracket Name, FreeVars) -rnBracket (VarBr flg n) +rnBracket (VarBr flg n) = do { name <- lookupOccRn n ; this_mod <- getModule ; unless (nameIsLocalOrFrom this_mod name) $ -- Reason: deprecation checking assumes do { _ <- loadInterfaceForName msg name -- the home interface is loaded, and - ; return () } -- this is the only way that is going - -- to happen + ; return () } -- this is the only way that is going + -- to happen ; return (VarBr flg name, unitFV name) } where msg = ptext (sLit "Need interface for Template Haskell quoted Name") rnBracket (ExpBr e) = do { (e', fvs) <- rnLExpr e - ; return (ExpBr e', fvs) } + ; return (ExpBr e', fvs) } rnBracket (PatBr p) = rnPat ThPatQuote p $ \ p' -> return (PatBr p', emptyFVs) rnBracket (TypBr t) = do { (t', fvs) <- rnLHsType TypBrCtx t - ; return (TypBr t', fvs) } + ; return (TypBr t', fvs) } -rnBracket (DecBrL decls) +rnBracket (DecBrL decls) = do { (group, mb_splice) <- findSplice decls ; case mb_splice of Nothing -> return () - Just (SpliceDecl (L loc _) _, _) + Just (SpliceDecl (L loc _) _, _) -> setSrcSpan loc $ addErr (ptext (sLit "Declaration splices are not permitted inside declaration brackets")) - -- Why not? See Section 7.3 of the TH paper. + -- Why not? See Section 7.3 of the TH paper. ; gbl_env <- getGblEnv ; let new_gbl_env = gbl_env { tcg_dus = emptyDUs } - -- The emptyDUs is so that we just collect uses for this + -- The emptyDUs is so that we just collect uses for this -- group alone in the call to rnSrcDecls below - ; (tcg_env, group') <- setGblEnv new_gbl_env $ - setStage thRnBrack $ - rnSrcDecls [] group + ; (tcg_env, group') <- setGblEnv new_gbl_env $ + setStage thRnBrack $ + rnSrcDecls [] group -- The empty list is for extra dependencies coming from .hs-boot files -- See Note [Extra dependencies from .hs-boot files] in RnSource - -- Discard the tcg_env; it contains only extra info about fixity - ; traceRn (text "rnBracket dec" <+> (ppr (tcg_dus tcg_env) $$ + -- Discard the tcg_env; it contains only extra info about fixity + ; traceRn (text "rnBracket dec" <+> (ppr (tcg_dus tcg_env) $$ ppr (duUses (tcg_dus tcg_env)))) - ; return (DecBrG group', duUses (tcg_dus tcg_env)) } + ; return (DecBrG group', duUses (tcg_dus tcg_env)) } rnBracket (DecBrG _) = panic "rnBracket: unexpected DecBrG" \end{code} %************************************************************************ -%* * +%* * \subsubsection{@Stmt@s: in @do@ expressions} -%* * +%* * %************************************************************************ \begin{code} -rnStmts :: Outputable (body RdrName) => HsStmtContext Name +rnStmts :: Outputable (body RdrName) => HsStmtContext Name -> (Located (body RdrName) -> RnM (Located (body Name), FreeVars)) -> [LStmt RdrName (Located (body RdrName))] -> ([Name] -> RnM (thing, FreeVars)) - -> RnM (([LStmt Name (Located (body Name))], thing), FreeVars) + -> RnM (([LStmt Name (Located (body Name))], thing), FreeVars) -- Variables bound by the Stmts, and mentioned in thing_inside, -- do not appear in the result FreeVars @@ -692,11 +685,11 @@ rnStmts ctxt _ [] thing_inside rnStmts MDoExpr rnBody stmts thing_inside -- Deal with mdo = -- Behave like do { rec { ...all but last... }; last } - do { ((stmts1, (stmts2, thing)), fvs) - <- rnStmt MDoExpr rnBody (noLoc $ mkRecStmt all_but_last) $ \ _ -> - do { last_stmt' <- checkLastStmt MDoExpr last_stmt - ; rnStmt MDoExpr rnBody last_stmt' thing_inside } - ; return (((stmts1 ++ stmts2), thing), fvs) } + do { ((stmts1, (stmts2, thing)), fvs) + <- rnStmt MDoExpr rnBody (noLoc $ mkRecStmt all_but_last) $ \ _ -> + do { last_stmt' <- checkLastStmt MDoExpr last_stmt + ; rnStmt MDoExpr rnBody last_stmt' thing_inside } + ; return (((stmts1 ++ stmts2), thing), fvs) } where Just (all_but_last, last_stmt) = snocView stmts @@ -707,16 +700,16 @@ rnStmts ctxt rnBody (lstmt@(L loc _) : lstmts) thing_inside ; rnStmt ctxt rnBody lstmt' thing_inside } | otherwise - = do { ((stmts1, (stmts2, thing)), fvs) + = do { ((stmts1, (stmts2, thing)), fvs) <- setSrcSpan loc $ do { checkStmt ctxt lstmt ; rnStmt ctxt rnBody lstmt $ \ bndrs1 -> rnStmts ctxt rnBody lstmts $ \ bndrs2 -> thing_inside (bndrs1 ++ bndrs2) } - ; return (((stmts1 ++ stmts2), thing), fvs) } + ; return (((stmts1 ++ stmts2), thing), fvs) } ---------------------- -rnStmt :: Outputable (body RdrName) => HsStmtContext Name +rnStmt :: Outputable (body RdrName) => HsStmtContext Name -> (Located (body RdrName) -> RnM (Located (body Name), FreeVars)) -> LStmt RdrName (Located (body RdrName)) -> ([Name] -> RnM (thing, FreeVars)) @@ -725,91 +718,91 @@ rnStmt :: Outputable (body RdrName) => HsStmtContext Name -- do not appear in the result FreeVars rnStmt ctxt rnBody (L loc (LastStmt body _)) thing_inside - = do { (body', fv_expr) <- rnBody body - ; (ret_op, fvs1) <- lookupStmtName ctxt returnMName - ; (thing, fvs3) <- thing_inside [] - ; return (([L loc (LastStmt body' ret_op)], thing), - fv_expr `plusFV` fvs1 `plusFV` fvs3) } + = do { (body', fv_expr) <- rnBody body + ; (ret_op, fvs1) <- lookupStmtName ctxt returnMName + ; (thing, fvs3) <- thing_inside [] + ; return (([L loc (LastStmt body' ret_op)], thing), + fv_expr `plusFV` fvs1 `plusFV` fvs3) } rnStmt ctxt rnBody (L loc (BodyStmt body _ _ _)) thing_inside - = do { (body', fv_expr) <- rnBody body - ; (then_op, fvs1) <- lookupStmtName ctxt thenMName - ; (guard_op, fvs2) <- if isListCompExpr ctxt + = do { (body', fv_expr) <- rnBody body + ; (then_op, fvs1) <- lookupStmtName ctxt thenMName + ; (guard_op, fvs2) <- if isListCompExpr ctxt then lookupStmtName ctxt guardMName - else return (noSyntaxExpr, emptyFVs) - -- Only list/parr/monad comprehensions use 'guard' - -- Also for sub-stmts of same eg [ e | x<-xs, gd | blah ] - -- Here "gd" is a guard - ; (thing, fvs3) <- thing_inside [] - ; return (([L loc (BodyStmt body' then_op guard_op placeHolderType)], thing), - fv_expr `plusFV` fvs1 `plusFV` fvs2 `plusFV` fvs3) } + else return (noSyntaxExpr, emptyFVs) + -- Only list/parr/monad comprehensions use 'guard' + -- Also for sub-stmts of same eg [ e | x<-xs, gd | blah ] + -- Here "gd" is a guard + ; (thing, fvs3) <- thing_inside [] + ; return (([L loc (BodyStmt body' then_op guard_op placeHolderType)], thing), + fv_expr `plusFV` fvs1 `plusFV` fvs2 `plusFV` fvs3) } rnStmt ctxt rnBody (L loc (BindStmt pat body _ _)) thing_inside - = do { (body', fv_expr) <- rnBody body - -- The binders do not scope over the expression - ; (bind_op, fvs1) <- lookupStmtName ctxt bindMName - ; (fail_op, fvs2) <- lookupStmtName ctxt failMName - ; rnPat (StmtCtxt ctxt) pat $ \ pat' -> do - { (thing, fvs3) <- thing_inside (collectPatBinders pat') - ; return (([L loc (BindStmt pat' body' bind_op fail_op)], thing), - fv_expr `plusFV` fvs1 `plusFV` fvs2 `plusFV` fvs3) }} + = do { (body', fv_expr) <- rnBody body + -- The binders do not scope over the expression + ; (bind_op, fvs1) <- lookupStmtName ctxt bindMName + ; (fail_op, fvs2) <- lookupStmtName ctxt failMName + ; rnPat (StmtCtxt ctxt) pat $ \ pat' -> do + { (thing, fvs3) <- thing_inside (collectPatBinders pat') + ; return (([L loc (BindStmt pat' body' bind_op fail_op)], thing), + fv_expr `plusFV` fvs1 `plusFV` fvs2 `plusFV` fvs3) }} -- fv_expr shouldn't really be filtered by the rnPatsAndThen - -- but it does not matter because the names are unique + -- but it does not matter because the names are unique -rnStmt _ _ (L loc (LetStmt binds)) thing_inside - = do { rnLocalBindsAndThen binds $ \binds' -> do - { (thing, fvs) <- thing_inside (collectLocalBinders binds') +rnStmt _ _ (L loc (LetStmt binds)) thing_inside + = do { rnLocalBindsAndThen binds $ \binds' -> do + { (thing, fvs) <- thing_inside (collectLocalBinders binds') ; return (([L loc (LetStmt binds')], thing), fvs) } } rnStmt ctxt rnBody (L _ (RecStmt { recS_stmts = rec_stmts })) thing_inside - = do { - -- Step1: Bring all the binders of the mdo into scope - -- (Remember that this also removes the binders from the - -- finally-returned free-vars.) - -- And rename each individual stmt, making a - -- singleton segment. At this stage the FwdRefs field - -- isn't finished: it's empty for all except a BindStmt - -- for which it's the fwd refs within the bind itself - -- (This set may not be empty, because we're in a recursive - -- context.) + = do { + -- Step1: Bring all the binders of the mdo into scope + -- (Remember that this also removes the binders from the + -- finally-returned free-vars.) + -- And rename each individual stmt, making a + -- singleton segment. At this stage the FwdRefs field + -- isn't finished: it's empty for all except a BindStmt + -- for which it's the fwd refs within the bind itself + -- (This set may not be empty, because we're in a recursive + -- context.) ; rnRecStmtsAndThen rnBody rec_stmts $ \ segs -> do - { let bndrs = nameSetToList $ foldr (unionNameSets . (\(ds,_,_,_) -> ds)) + { let bndrs = nameSetToList $ foldr (unionNameSets . (\(ds,_,_,_) -> ds)) emptyNameSet segs ; (thing, fvs_later) <- thing_inside bndrs - ; (return_op, fvs1) <- lookupStmtName ctxt returnMName - ; (mfix_op, fvs2) <- lookupStmtName ctxt mfixName - ; (bind_op, fvs3) <- lookupStmtName ctxt bindMName - ; let - -- Step 2: Fill in the fwd refs. - -- The segments are all singletons, but their fwd-ref - -- field mentions all the things used by the segment - -- that are bound after their use - segs_w_fwd_refs = addFwdRefs segs - - -- Step 3: Group together the segments to make bigger segments - -- Invariant: in the result, no segment uses a variable - -- bound in a later segment - grouped_segs = glomSegments ctxt segs_w_fwd_refs - - -- Step 4: Turn the segments into Stmts - -- Use RecStmt when and only when there are fwd refs - -- Also gather up the uses from the end towards the - -- start, so we can tell the RecStmt which things are - -- used 'after' the RecStmt - empty_rec_stmt = emptyRecStmt { recS_ret_fn = return_op + ; (return_op, fvs1) <- lookupStmtName ctxt returnMName + ; (mfix_op, fvs2) <- lookupStmtName ctxt mfixName + ; (bind_op, fvs3) <- lookupStmtName ctxt bindMName + ; let + -- Step 2: Fill in the fwd refs. + -- The segments are all singletons, but their fwd-ref + -- field mentions all the things used by the segment + -- that are bound after their use + segs_w_fwd_refs = addFwdRefs segs + + -- Step 3: Group together the segments to make bigger segments + -- Invariant: in the result, no segment uses a variable + -- bound in a later segment + grouped_segs = glomSegments ctxt segs_w_fwd_refs + + -- Step 4: Turn the segments into Stmts + -- Use RecStmt when and only when there are fwd refs + -- Also gather up the uses from the end towards the + -- start, so we can tell the RecStmt which things are + -- used 'after' the RecStmt + empty_rec_stmt = emptyRecStmt { recS_ret_fn = return_op , recS_mfix_fn = mfix_op , recS_bind_fn = bind_op } - (rec_stmts', fvs) = segsToStmts empty_rec_stmt grouped_segs fvs_later + (rec_stmts', fvs) = segsToStmts empty_rec_stmt grouped_segs fvs_later - ; return ((rec_stmts', thing), fvs `plusFV` fvs1 `plusFV` fvs2 `plusFV` fvs3) } } + ; return ((rec_stmts', thing), fvs `plusFV` fvs1 `plusFV` fvs2 `plusFV` fvs3) } } rnStmt ctxt _ (L loc (ParStmt segs _ _)) thing_inside - = do { (mzip_op, fvs1) <- lookupStmtName ctxt mzipName + = do { (mzip_op, fvs1) <- lookupStmtName ctxt mzipName ; (bind_op, fvs2) <- lookupStmtName ctxt bindMName ; (return_op, fvs3) <- lookupStmtName ctxt returnMName - ; ((segs', thing), fvs4) <- rnParallelStmts (ParStmtCtxt ctxt) return_op segs thing_inside - ; return ( ([L loc (ParStmt segs' mzip_op bind_op)], thing) + ; ((segs', thing), fvs4) <- rnParallelStmts (ParStmtCtxt ctxt) return_op segs thing_inside + ; return ( ([L loc (ParStmt segs' mzip_op bind_op)], thing) , fvs1 `plusFV` fvs2 `plusFV` fvs3 `plusFV` fvs4) } rnStmt ctxt _ (L loc (TransStmt { trS_stmts = stmts, trS_by = by, trS_form = form @@ -819,7 +812,7 @@ rnStmt ctxt _ (L loc (TransStmt { trS_stmts = stmts, trS_by = by, trS_form = for -- Rename the stmts and the 'by' expression -- Keep track of the variables mentioned in the 'by' expression - ; ((stmts', (by', used_bndrs, thing)), fvs2) + ; ((stmts', (by', used_bndrs, thing)), fvs2) <- rnStmts (TransStmtCtxt ctxt) rnLExpr stmts $ \ bndrs -> do { (by', fvs_by) <- mapMaybeFvRn rnLExpr by ; (thing, fvs_thing) <- thing_inside bndrs @@ -836,10 +829,10 @@ rnStmt ctxt _ (L loc (TransStmt { trS_stmts = stmts, trS_by = by, trS_form = for ThenForm -> return (noSyntaxExpr, emptyFVs) _ -> lookupStmtName ctxt fmapName - ; let all_fvs = fvs1 `plusFV` fvs2 `plusFV` fvs3 + ; let all_fvs = fvs1 `plusFV` fvs2 `plusFV` fvs3 `plusFV` fvs4 `plusFV` fvs5 bndr_map = used_bndrs `zip` used_bndrs - -- See Note [TransStmt binder map] in HsExpr + -- See Note [TransStmt binder map] in HsExpr ; traceRn (text "rnStmt: implicitly rebound these used binders:" <+> ppr bndr_map) ; return (([L loc (TransStmt { trS_stmts = stmts', trS_bndrs = bndr_map @@ -847,7 +840,7 @@ rnStmt ctxt _ (L loc (TransStmt { trS_stmts = stmts, trS_by = by, trS_form = for , trS_ret = return_op, trS_bind = bind_op , trS_fmap = fmap_op })], thing), all_fvs) } -rnParallelStmts :: forall thing. HsStmtContext Name +rnParallelStmts :: forall thing. HsStmtContext Name -> SyntaxExpr Name -> [ParStmtBlock RdrName RdrName] -> ([Name] -> RnM (thing, FreeVars)) @@ -860,20 +853,20 @@ rnParallelStmts ctxt return_op segs thing_inside rn_segs :: LocalRdrEnv -> [Name] -> [ParStmtBlock RdrName RdrName] -> RnM (([ParStmtBlock Name Name], thing), FreeVars) - rn_segs _ bndrs_so_far [] + rn_segs _ bndrs_so_far [] = do { let (bndrs', dups) = removeDups cmpByOcc bndrs_so_far ; mapM_ dupErr dups ; (thing, fvs) <- bindLocalNames bndrs' (thing_inside bndrs') ; return (([], thing), fvs) } - rn_segs env bndrs_so_far (ParStmtBlock stmts _ _ : segs) + rn_segs env bndrs_so_far (ParStmtBlock stmts _ _ : segs) = do { ((stmts', (used_bndrs, segs', thing)), fvs) <- rnStmts ctxt rnLExpr stmts $ \ bndrs -> setLocalRdrEnv env $ do { ((segs', thing), fvs) <- rn_segs env (bndrs ++ bndrs_so_far) segs - ; let used_bndrs = filter (`elemNameSet` fvs) bndrs + ; let used_bndrs = filter (`elemNameSet` fvs) bndrs ; return ((used_bndrs, segs', thing), fvs) } - + ; let seg' = ParStmtBlock stmts' used_bndrs return_op ; return ((seg':segs', thing), fvs) } @@ -884,7 +877,7 @@ rnParallelStmts ctxt return_op segs thing_inside lookupStmtName :: HsStmtContext Name -> Name -> RnM (HsExpr Name, FreeVars) -- Like lookupSyntaxName, but ListComp/PArrComp are never rebindable -- Neither is ArrowExpr, which has its own desugarer in DsArrows -lookupStmtName ctxt n +lookupStmtName ctxt n = case ctxt of ListComp -> not_rebindable PArrComp -> not_rebindable @@ -896,8 +889,8 @@ lookupStmtName ctxt n MonadComp -> rebindable GhciStmtCtxt -> rebindable -- I suppose? - ParStmtCtxt c -> lookupStmtName c n -- Look inside to - TransStmtCtxt c -> lookupStmtName c n -- the parent context + ParStmtCtxt c -> lookupStmtName c n -- Look inside to + TransStmtCtxt c -> lookupStmtName c n -- the parent context where rebindable = lookupSyntaxName n not_rebindable = return (HsVar n, emptyFVs) @@ -905,36 +898,36 @@ lookupStmtName ctxt n Note [Renaming parallel Stmts] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Renaming parallel statements is painful. Given, say +Renaming parallel statements is painful. Given, say [ a+c | a <- as, bs <- bss | c <- bs, a <- ds ] Note that (a) In order to report "Defined by not used" about 'bs', we must rename each group of Stmts with a thing_inside whose FreeVars include at least {a,c} - + (b) We want to report that 'a' is illegally bound in both branches - (c) The 'bs' in the second group must obviously not be captured by + (c) The 'bs' in the second group must obviously not be captured by the binding in the first group -To satisfy (a) we nest the segements. +To satisfy (a) we nest the segements. To satisfy (b) we check for duplicates just before thing_inside. To satisfy (c) we reset the LocalRdrEnv each time. %************************************************************************ -%* * +%* * \subsubsection{mdo expressions} -%* * +%* * %************************************************************************ \begin{code} type FwdRefs = NameSet type Segment stmts = (Defs, - Uses, -- May include defs - FwdRefs, -- A subset of uses that are - -- (a) used before they are bound in this segment, or - -- (b) used here, and bound in subsequent segments - stmts) -- Either Stmt or [Stmt] + Uses, -- May include defs + FwdRefs, -- A subset of uses that are + -- (a) used before they are bound in this segment, or + -- (b) used here, and bound in subsequent segments + stmts) -- Either Stmt or [Stmt] -- wrapper that does both the left- and right-hand sides @@ -946,35 +939,35 @@ rnRecStmtsAndThen :: Outputable (body RdrName) => -> ([Segment (LStmt Name (Located (body Name)))] -> RnM (a, FreeVars)) -> RnM (a, FreeVars) rnRecStmtsAndThen rnBody s cont - = do { -- (A) Make the mini fixity env for all of the stmts - fix_env <- makeMiniFixityEnv (collectRecStmtsFixities s) + = do { -- (A) Make the mini fixity env for all of the stmts + fix_env <- makeMiniFixityEnv (collectRecStmtsFixities s) - -- (B) Do the LHSes - ; new_lhs_and_fv <- rn_rec_stmts_lhs fix_env s + -- (B) Do the LHSes + ; new_lhs_and_fv <- rn_rec_stmts_lhs fix_env s - -- ...bring them and their fixities into scope - ; let bound_names = collectLStmtsBinders (map fst new_lhs_and_fv) - -- Fake uses of variables introduced implicitly (warning suppression, see #4404) - implicit_uses = lStmtsImplicits (map fst new_lhs_and_fv) - ; bindLocalNamesFV bound_names $ + -- ...bring them and their fixities into scope + ; let bound_names = collectLStmtsBinders (map fst new_lhs_and_fv) + -- Fake uses of variables introduced implicitly (warning suppression, see #4404) + implicit_uses = lStmtsImplicits (map fst new_lhs_and_fv) + ; bindLocalNamesFV bound_names $ addLocalFixities fix_env bound_names $ do - -- (C) do the right-hand-sides and thing-inside - { segs <- rn_rec_stmts rnBody bound_names new_lhs_and_fv - ; (res, fvs) <- cont segs - ; warnUnusedLocalBinds bound_names (fvs `unionNameSets` implicit_uses) - ; return (res, fvs) }} + -- (C) do the right-hand-sides and thing-inside + { segs <- rn_rec_stmts rnBody bound_names new_lhs_and_fv + ; (res, fvs) <- cont segs + ; warnUnusedLocalBinds bound_names (fvs `unionNameSets` implicit_uses) + ; return (res, fvs) }} -- get all the fixity decls in any Let stmt collectRecStmtsFixities :: [LStmtLR RdrName RdrName body] -> [LFixitySig RdrName] -collectRecStmtsFixities l = - foldr (\ s -> \acc -> case s of - (L _ (LetStmt (HsValBinds (ValBindsIn _ sigs)))) -> - foldr (\ sig -> \ acc -> case sig of +collectRecStmtsFixities l = + foldr (\ s -> \acc -> case s of + (L _ (LetStmt (HsValBinds (ValBindsIn _ sigs)))) -> + foldr (\ sig -> \ acc -> case sig of (L loc (FixSig s)) -> (L loc s) : acc _ -> acc) acc sigs _ -> acc) [] l - + -- left-hand sides rn_rec_stmt_lhs :: Outputable body => MiniFixityEnv @@ -984,23 +977,23 @@ rn_rec_stmt_lhs :: Outputable body => MiniFixityEnv -- so we don't bother to compute it accurately in the other cases -> RnM [(LStmtLR Name RdrName body, FreeVars)] -rn_rec_stmt_lhs _ (L loc (BodyStmt body a b c)) +rn_rec_stmt_lhs _ (L loc (BodyStmt body a b c)) = return [(L loc (BodyStmt body a b c), emptyFVs)] -rn_rec_stmt_lhs _ (L loc (LastStmt body a)) +rn_rec_stmt_lhs _ (L loc (LastStmt body a)) = return [(L loc (LastStmt body a), emptyFVs)] -rn_rec_stmt_lhs fix_env (L loc (BindStmt pat body a b)) - = do +rn_rec_stmt_lhs fix_env (L loc (BindStmt pat body a b)) + = do -- should the ctxt be MDo instead? - (pat', fv_pat) <- rnBindPat (localRecNameMaker fix_env) pat + (pat', fv_pat) <- rnBindPat (localRecNameMaker fix_env) pat return [(L loc (BindStmt pat' body a b), fv_pat)] rn_rec_stmt_lhs _ (L _ (LetStmt binds@(HsIPBinds _))) = failWith (badIpBinds (ptext (sLit "an mdo expression")) binds) -rn_rec_stmt_lhs fix_env (L loc (LetStmt (HsValBinds binds))) +rn_rec_stmt_lhs fix_env (L loc (LetStmt (HsValBinds binds))) = do (_bound_names, binds') <- rnLocalValBindsLHS fix_env binds return [(L loc (LetStmt (HsValBinds binds')), -- Warning: this is bogus; see function invariant @@ -1008,20 +1001,20 @@ rn_rec_stmt_lhs fix_env (L loc (LetStmt (HsValBinds binds))) )] -- XXX Do we need to do something with the return and mfix names? -rn_rec_stmt_lhs fix_env (L _ (RecStmt { recS_stmts = stmts })) -- Flatten Rec inside Rec +rn_rec_stmt_lhs fix_env (L _ (RecStmt { recS_stmts = stmts })) -- Flatten Rec inside Rec = rn_rec_stmts_lhs fix_env stmts -rn_rec_stmt_lhs _ stmt@(L _ (ParStmt {})) -- Syntactically illegal in mdo +rn_rec_stmt_lhs _ stmt@(L _ (ParStmt {})) -- Syntactically illegal in mdo = pprPanic "rn_rec_stmt" (ppr stmt) - -rn_rec_stmt_lhs _ stmt@(L _ (TransStmt {})) -- Syntactically illegal in mdo + +rn_rec_stmt_lhs _ stmt@(L _ (TransStmt {})) -- Syntactically illegal in mdo = pprPanic "rn_rec_stmt" (ppr stmt) rn_rec_stmt_lhs _ (L _ (LetStmt EmptyLocalBinds)) = panic "rn_rec_stmt LetStmt EmptyLocalBinds" rn_rec_stmts_lhs :: Outputable body => MiniFixityEnv - -> [LStmt RdrName body] + -> [LStmt RdrName body] -> RnM [(LStmtLR Name RdrName body, FreeVars)] rn_rec_stmts_lhs fix_env stmts = do { ls <- concatMapM (rn_rec_stmt_lhs fix_env) stmts @@ -1039,41 +1032,41 @@ rn_rec_stmt :: (Outputable (body RdrName)) => (Located (body RdrName) -> RnM (Located (body Name), FreeVars)) -> [Name] -> LStmtLR Name RdrName (Located (body RdrName)) -> FreeVars -> RnM [Segment (LStmt Name (Located (body Name)))] - -- Rename a Stmt that is inside a RecStmt (or mdo) - -- Assumes all binders are already in scope - -- Turns each stmt into a singleton Stmt + -- Rename a Stmt that is inside a RecStmt (or mdo) + -- Assumes all binders are already in scope + -- Turns each stmt into a singleton Stmt rn_rec_stmt rnBody _ (L loc (LastStmt body _)) _ - = do { (body', fv_expr) <- rnBody body - ; (ret_op, fvs1) <- lookupSyntaxName returnMName - ; return [(emptyNameSet, fv_expr `plusFV` fvs1, emptyNameSet, + = do { (body', fv_expr) <- rnBody body + ; (ret_op, fvs1) <- lookupSyntaxName returnMName + ; return [(emptyNameSet, fv_expr `plusFV` fvs1, emptyNameSet, L loc (LastStmt body' ret_op))] } rn_rec_stmt rnBody _ (L loc (BodyStmt body _ _ _)) _ = rnBody body `thenM` \ (body', fvs) -> - lookupSyntaxName thenMName `thenM` \ (then_op, fvs1) -> + lookupSyntaxName thenMName `thenM` \ (then_op, fvs1) -> return [(emptyNameSet, fvs `plusFV` fvs1, emptyNameSet, - L loc (BodyStmt body' then_op noSyntaxExpr placeHolderType))] + L loc (BodyStmt body' then_op noSyntaxExpr placeHolderType))] rn_rec_stmt rnBody _ (L loc (BindStmt pat' body _ _)) fv_pat - = rnBody body `thenM` \ (body', fv_expr) -> - lookupSyntaxName bindMName `thenM` \ (bind_op, fvs1) -> - lookupSyntaxName failMName `thenM` \ (fail_op, fvs2) -> + = rnBody body `thenM` \ (body', fv_expr) -> + lookupSyntaxName bindMName `thenM` \ (bind_op, fvs1) -> + lookupSyntaxName failMName `thenM` \ (fail_op, fvs2) -> let - bndrs = mkNameSet (collectPatBinders pat') - fvs = fv_expr `plusFV` fv_pat `plusFV` fvs1 `plusFV` fvs2 + bndrs = mkNameSet (collectPatBinders pat') + fvs = fv_expr `plusFV` fv_pat `plusFV` fvs1 `plusFV` fvs2 in return [(bndrs, fvs, bndrs `intersectNameSet` fvs, - L loc (BindStmt pat' body' bind_op fail_op))] + L loc (BindStmt pat' body' bind_op fail_op))] rn_rec_stmt _ _ (L _ (LetStmt binds@(HsIPBinds _))) _ = failWith (badIpBinds (ptext (sLit "an mdo expression")) binds) -rn_rec_stmt _ all_bndrs (L loc (LetStmt (HsValBinds binds'))) _ = do - (binds', du_binds) <- +rn_rec_stmt _ all_bndrs (L loc (LetStmt (HsValBinds binds'))) _ = do + (binds', du_binds) <- -- fixities and unused are handled above in rnRecStmtsAndThen rnLocalValBindsRHS (mkNameSet all_bndrs) binds' - return [(duDefs du_binds, allUses du_binds, - emptyNameSet, L loc (LetStmt (HsValBinds binds')))] + return [(duDefs du_binds, allUses du_binds, + emptyNameSet, L loc (LetStmt (HsValBinds binds')))] -- no RecStmt case because they get flattened above when doing the LHSes rn_rec_stmt _ _ stmt@(L _ (RecStmt {})) _ @@ -1090,46 +1083,46 @@ rn_rec_stmt _ _ (L _ (LetStmt EmptyLocalBinds)) _ rn_rec_stmts :: Outputable (body RdrName) => (Located (body RdrName) -> RnM (Located (body Name), FreeVars)) - -> [Name] - -> [(LStmtLR Name RdrName (Located (body RdrName)), FreeVars)] + -> [Name] + -> [(LStmtLR Name RdrName (Located (body RdrName)), FreeVars)] -> RnM [Segment (LStmt Name (Located (body Name)))] -rn_rec_stmts rnBody bndrs stmts = +rn_rec_stmts rnBody bndrs stmts = mapM (uncurry (rn_rec_stmt rnBody bndrs)) stmts `thenM` \ segs_s -> return (concat segs_s) --------------------------------------------- addFwdRefs :: [Segment a] -> [Segment a] -- So far the segments only have forward refs *within* the Stmt --- (which happens for bind: x <- ...x...) +-- (which happens for bind: x <- ...x...) -- This function adds the cross-seg fwd ref info -addFwdRefs pairs +addFwdRefs pairs = fst (foldr mk_seg ([], emptyNameSet) pairs) where mk_seg (defs, uses, fwds, stmts) (segs, later_defs) - = (new_seg : segs, all_defs) - where - new_seg = (defs, uses, new_fwds, stmts) - all_defs = later_defs `unionNameSets` defs - new_fwds = fwds `unionNameSets` (uses `intersectNameSet` later_defs) - -- Add the downstream fwd refs here + = (new_seg : segs, all_defs) + where + new_seg = (defs, uses, new_fwds, stmts) + all_defs = later_defs `unionNameSets` defs + new_fwds = fwds `unionNameSets` (uses `intersectNameSet` later_defs) + -- Add the downstream fwd refs here ---------------------------------------------------- --- Glomming the singleton segments of an mdo into --- minimal recursive groups. +-- Glomming the singleton segments of an mdo into +-- minimal recursive groups. -- -- At first I thought this was just strongly connected components, but -- there's an important constraint: the order of the stmts must not change. -- -- Consider --- mdo { x <- ...y... --- p <- z --- y <- ...x... --- q <- x --- z <- y --- r <- x } +-- mdo { x <- ...y... +-- p <- z +-- y <- ...x... +-- q <- x +-- z <- y +-- r <- x } -- --- Here, the first stmt mention 'y', which is bound in the third. +-- Here, the first stmt mention 'y', which is bound in the third. -- But that means that the innocent second stmt (p <- z) gets caught -- up in the recursion. And that in turn means that the binding for -- 'z' has to be included... and so on. @@ -1138,16 +1131,16 @@ addFwdRefs pairs -- Now add the next one { z <- y ; r <- x } -- Now add one more { q <- x ; z <- y ; r <- x } -- Now one more... but this time we have to group a bunch into rec --- { rec { y <- ...x... ; q <- x ; z <- y } ; r <- x } +-- { rec { y <- ...x... ; q <- x ; z <- y } ; r <- x } -- Now one more, which we can add on without a rec --- { p <- z ; --- rec { y <- ...x... ; q <- x ; z <- y } ; --- r <- x } +-- { p <- z ; +-- rec { y <- ...x... ; q <- x ; z <- y } ; +-- r <- x } -- Finally we add the last one; since it mentions y we have to -- glom it togeher with the first two groups --- { rec { x <- ...y...; p <- z ; y <- ...x... ; --- q <- x ; z <- y } ; --- r <- x } +-- { rec { x <- ...y...; p <- z ; y <- ...x... ; +-- q <- x ; z <- y } ; +-- r <- x } -- -- NB. June 7 2012: We only glom segments that appear in -- an explicit mdo; and leave those found in "do rec"'s intact. @@ -1158,30 +1151,30 @@ glomSegments :: HsStmtContext Name -> [Segment (LStmt Name body)] -> [Segment [L glomSegments _ [] = [] glomSegments ctxt ((defs,uses,fwds,stmt) : segs) - -- Actually stmts will always be a singleton + -- Actually stmts will always be a singleton = (seg_defs, seg_uses, seg_fwds, seg_stmts) : others where - segs' = glomSegments ctxt segs + segs' = glomSegments ctxt segs (extras, others) = grab uses segs' (ds, us, fs, ss) = unzip4 extras - + seg_defs = plusFVs ds `plusFV` defs seg_uses = plusFVs us `plusFV` uses seg_fwds = plusFVs fs `plusFV` fwds seg_stmts = stmt : concat ss - grab :: NameSet -- The client - -> [Segment a] - -> ([Segment a], -- Needed by the 'client' - [Segment a]) -- Not needed by the client - -- The result is simply a split of the input - grab uses dus - = (reverse yeses, reverse noes) - where - (noes, yeses) = span not_needed (reverse dus) - not_needed (defs,_,_,_) = case ctxt of - MDoExpr -> not (intersectsNameSet defs uses) - _ -> False -- unless we're in mdo, we *need* everything + grab :: NameSet -- The client + -> [Segment a] + -> ([Segment a], -- Needed by the 'client' + [Segment a]) -- Not needed by the client + -- The result is simply a split of the input + grab uses dus + = (reverse yeses, reverse noes) + where + (noes, yeses) = span not_needed (reverse dus) + not_needed (defs,_,_,_) = case ctxt of + MDoExpr -> not (intersectsNameSet defs uses) + _ -> False -- unless we're in mdo, we *need* everything ---------------------------------------------------- @@ -1196,20 +1189,20 @@ segsToStmts empty_rec_stmt ((defs, uses, fwds, ss) : segs) fvs_later (new_stmt : later_stmts, later_uses `plusFV` uses) where (later_stmts, later_uses) = segsToStmts empty_rec_stmt segs fvs_later - new_stmt | non_rec = head ss - | otherwise = L (getLoc (head ss)) rec_stmt + new_stmt | non_rec = head ss + | otherwise = L (getLoc (head ss)) rec_stmt rec_stmt = empty_rec_stmt { recS_stmts = ss , recS_later_ids = nameSetToList used_later , recS_rec_ids = nameSetToList fwds } non_rec = isSingleton ss && isEmptyNameSet fwds used_later = defs `intersectNameSet` later_uses - -- The ones needed after the RecStmt + -- The ones needed after the RecStmt \end{code} %************************************************************************ -%* * +%* * \subsubsection{Assertion utils} -%* * +%* * %************************************************************************ \begin{code} @@ -1230,22 +1223,22 @@ Note [Adding the implicit parameter to 'assert'] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The renamer transforms (assert e1 e2) to (assert "Foo.hs:27" e1 e2). By doing this in the renamer we allow the typechecker to just see the -expanded application and do the right thing. But it's not really +expanded application and do the right thing. But it's not really the Right Thing because there's no way to "undo" if you want to see the original source code. We'll have fix this in due course, when -we care more about being able to reconstruct the exact original +we care more about being able to reconstruct the exact original program. %************************************************************************ -%* * +%* * \subsubsection{Errors} -%* * +%* * %************************************************************************ \begin{code} checkEmptyStmts :: HsStmtContext Name -> RnM () -- We've seen an empty sequence of Stmts... is that ok? -checkEmptyStmts ctxt +checkEmptyStmts ctxt = unless (okEmpty ctxt) (addErr (emptyErr ctxt)) okEmpty :: HsStmtContext a -> Bool @@ -1257,35 +1250,35 @@ emptyErr (ParStmtCtxt {}) = ptext (sLit "Empty statement group in parallel com emptyErr (TransStmtCtxt {}) = ptext (sLit "Empty statement group preceding 'group' or 'then'") emptyErr ctxt = ptext (sLit "Empty") <+> pprStmtContext ctxt ----------------------- +---------------------- checkLastStmt :: Outputable (body RdrName) => HsStmtContext Name -> LStmt RdrName (Located (body RdrName)) -> RnM (LStmt RdrName (Located (body RdrName))) checkLastStmt ctxt lstmt@(L loc stmt) - = case ctxt of + = case ctxt of ListComp -> check_comp MonadComp -> check_comp PArrComp -> check_comp - ArrowExpr -> check_do - DoExpr -> check_do + ArrowExpr -> check_do + DoExpr -> check_do MDoExpr -> check_do _ -> check_other where - check_do -- Expect BodyStmt, and change it to LastStmt - = case stmt of + check_do -- Expect BodyStmt, and change it to LastStmt + = case stmt of BodyStmt e _ _ _ -> return (L loc (mkLastStmt e)) LastStmt {} -> return lstmt -- "Deriving" clauses may generate a - -- LastStmt directly (unlike the parser) - _ -> do { addErr (hang last_error 2 (ppr stmt)); return lstmt } + -- LastStmt directly (unlike the parser) + _ -> do { addErr (hang last_error 2 (ppr stmt)); return lstmt } last_error = (ptext (sLit "The last statement in") <+> pprAStmtContext ctxt <+> ptext (sLit "must be an expression")) - check_comp -- Expect LastStmt; this should be enforced by the parser! - = case stmt of + check_comp -- Expect LastStmt; this should be enforced by the parser! + = case stmt of LastStmt {} -> return lstmt _ -> pprPanic "checkLastStmt" (ppr lstmt) - check_other -- Behave just as if this wasn't the last stmt + check_other -- Behave just as if this wasn't the last stmt = do { checkStmt ctxt lstmt; return lstmt } -- Checking when a particular Stmt is ok @@ -1294,7 +1287,7 @@ checkStmt :: HsStmtContext Name -> RnM () checkStmt ctxt (L _ stmt) = do { dflags <- getDynFlags - ; case okStmt dflags ctxt stmt of + ; case okStmt dflags ctxt stmt of Nothing -> return () Just extra -> addErr (msg $$ extra) } where @@ -1321,17 +1314,17 @@ okStmt, okDoStmt, okCompStmt, okParStmt, okPArrStmt -- Return Nothing if OK, (Just extra) if not ok -- The "extra" is an SDoc that is appended to an generic error message -okStmt dflags ctxt stmt +okStmt dflags ctxt stmt = case ctxt of - PatGuard {} -> okPatGuardStmt stmt - ParStmtCtxt ctxt -> okParStmt dflags ctxt stmt - DoExpr -> okDoStmt dflags ctxt stmt - MDoExpr -> okDoStmt dflags ctxt stmt - ArrowExpr -> okDoStmt dflags ctxt stmt + PatGuard {} -> okPatGuardStmt stmt + ParStmtCtxt ctxt -> okParStmt dflags ctxt stmt + DoExpr -> okDoStmt dflags ctxt stmt + MDoExpr -> okDoStmt dflags ctxt stmt + ArrowExpr -> okDoStmt dflags ctxt stmt GhciStmtCtxt -> okDoStmt dflags ctxt stmt - ListComp -> okCompStmt dflags ctxt stmt - MonadComp -> okCompStmt dflags ctxt stmt - PArrComp -> okPArrStmt dflags ctxt stmt + ListComp -> okCompStmt dflags ctxt stmt + MonadComp -> okCompStmt dflags ctxt stmt + PArrComp -> okPArrStmt dflags ctxt stmt TransStmtCtxt ctxt -> okStmt dflags ctxt stmt ------------- @@ -1354,7 +1347,7 @@ okDoStmt dflags ctxt stmt = case stmt of RecStmt {} | Opt_RecursiveDo `xopt` dflags -> isOK - | ArrowExpr <- ctxt -> isOK -- Arrows allows 'rec' + | ArrowExpr <- ctxt -> isOK -- Arrows allows 'rec' | otherwise -> Just (ptext (sLit "Use -XRecursiveDo")) BindStmt {} -> isOK LetStmt {} -> isOK @@ -1367,10 +1360,10 @@ okCompStmt dflags _ stmt BindStmt {} -> isOK LetStmt {} -> isOK BodyStmt {} -> isOK - ParStmt {} + ParStmt {} | Opt_ParallelListComp `xopt` dflags -> isOK | otherwise -> Just (ptext (sLit "Use -XParallelListComp")) - TransStmt {} + TransStmt {} | Opt_TransformListComp `xopt` dflags -> isOK | otherwise -> Just (ptext (sLit "Use -XTransformListComp")) RecStmt {} -> notOK @@ -1382,7 +1375,7 @@ okPArrStmt dflags _ stmt BindStmt {} -> isOK LetStmt {} -> isOK BodyStmt {} -> isOK - ParStmt {} + ParStmt {} | Opt_ParallelListComp `xopt` dflags -> isOK | otherwise -> Just (ptext (sLit "Use -XParallelListComp")) TransStmt {} -> notOK @@ -1392,8 +1385,8 @@ okPArrStmt dflags _ stmt --------- checkTupleSection :: [HsTupArg RdrName] -> RnM () checkTupleSection args - = do { tuple_section <- xoptM Opt_TupleSections - ; checkErr (all tupArgPresent args || tuple_section) msg } + = do { tuple_section <- xoptM Opt_TupleSections + ; checkErr (all tupArgPresent args || tuple_section) msg } where msg = ptext (sLit "Illegal tuple section: use -XTupleSections") @@ -1405,11 +1398,11 @@ sectionErr expr patSynErr :: HsExpr RdrName -> RnM (HsExpr Name, FreeVars) patSynErr e = do { addErr (sep [ptext (sLit "Pattern syntax in expression context:"), - nest 4 (ppr e)]) - ; return (EWildPat, emptyFVs) } + nest 4 (ppr e)]) + ; return (EWildPat, emptyFVs) } badIpBinds :: Outputable a => SDoc -> a -> SDoc badIpBinds what binds = hang (ptext (sLit "Implicit-parameter bindings illegal in") <+> what) - 2 (ppr binds) + 2 (ppr binds) \end{code} diff --git a/compiler/rename/RnNames.lhs b/compiler/rename/RnNames.lhs index 83aad51d34f7..4e5672b808bc 100644 --- a/compiler/rename/RnNames.lhs +++ b/compiler/rename/RnNames.lhs @@ -1318,12 +1318,9 @@ warnUnusedImportDecls gbl_env ; whenGOptM Opt_D_dump_minimal_imports $ printMinimalImports usage } where - explicit_import (L _ decl) = unLoc (ideclName decl) /= pRELUDE_NAME + explicit_import (L _ decl) = not (ideclImplicit decl) -- Filter out the implicit Prelude import -- which we do not want to bleat about - -- This also filters out an *explicit* Prelude import - -- but solving that problem involves more plumbing, and - -- it just doesn't seem worth it \end{code} diff --git a/compiler/rename/RnTypes.lhs b/compiler/rename/RnTypes.lhs index a230701ec297..fb55ac932ce5 100644 --- a/compiler/rename/RnTypes.lhs +++ b/compiler/rename/RnTypes.lhs @@ -4,26 +4,19 @@ \section[RnSource]{Main pass of renamer} \begin{code} -{-# OPTIONS -fno-warn-tabs #-} --- The above warning supression flag is a temporary kludge. --- While working on this module you are encouraged to remove it and --- detab the module (please do the detabbing in a separate patch). See --- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces --- for details - -module RnTypes ( - -- Type related stuff - rnHsType, rnLHsType, rnLHsTypes, rnContext, +module RnTypes ( + -- Type related stuff + rnHsType, rnLHsType, rnLHsTypes, rnContext, rnHsKind, rnLHsKind, rnLHsMaybeKind, - rnHsSigType, rnLHsInstType, rnConDeclFields, + rnHsSigType, rnLHsInstType, rnConDeclFields, newTyVarNameRn, - -- Precence related stuff - mkOpAppRn, mkNegAppRn, mkOpFormRn, mkConOpPatRn, - checkPrecMatch, checkSectionPrec, warnUnusedForAlls, + -- Precence related stuff + mkOpAppRn, mkNegAppRn, mkOpFormRn, mkConOpPatRn, + checkPrecMatch, checkSectionPrec, warnUnusedForAlls, - -- Splice related stuff - rnSplice, checkTH, + -- Splice related stuff + rnSplice, checkTH, -- Binding related stuff bindSigTyVarsFV, bindHsTyVars, rnHsBndrSig, @@ -34,7 +27,7 @@ module RnTypes ( import {-# SOURCE #-} RnExpr( rnLExpr ) #ifdef GHCI import {-# SOURCE #-} TcSplice( runQuasiQuoteType ) -#endif /* GHCI */ +#endif /* GHCI */ import DynFlags import HsSyn @@ -49,13 +42,13 @@ import SrcLoc import NameSet import Util -import BasicTypes ( compareFixity, funTyFixity, negateFixity, - Fixity(..), FixityDirection(..) ) +import BasicTypes ( compareFixity, funTyFixity, negateFixity, + Fixity(..), FixityDirection(..) ) import Outputable import FastString import Maybes import Data.List ( nub ) -import Control.Monad ( unless, when ) +import Control.Monad ( unless, when ) #include "HsVersions.h" \end{code} @@ -64,20 +57,20 @@ These type renamers are in a separate module, rather than in (say) RnSource, to break several loop. %********************************************************* -%* * +%* * \subsection{Renaming types} -%* * +%* * %********************************************************* \begin{code} rnHsSigType :: SDoc -> LHsType RdrName -> RnM (LHsType Name, FreeVars) - -- rnHsSigType is used for source-language type signatures, - -- which use *implicit* universal quantification. + -- rnHsSigType is used for source-language type signatures, + -- which use *implicit* universal quantification. rnHsSigType doc_str ty = rnLHsType (TypeSigCtx doc_str) ty rnLHsInstType :: SDoc -> LHsType RdrName -> RnM (LHsType Name, FreeVars) -- Rename the type in an instance or standalone deriving decl -rnLHsInstType doc_str ty +rnLHsInstType doc_str ty = do { (ty', fvs) <- rnLHsType (GenericCtx doc_str) ty ; unless good_inst_ty (addErrAt (getLoc ty) (badInstTy ty)) ; return (ty', fvs) } @@ -88,7 +81,7 @@ rnLHsInstType doc_str ty | otherwise = False badInstTy :: LHsType RdrName -> SDoc -badInstTy ty = ptext (sLit "Malformed instance:") <+> ppr ty +badInstTy ty = ptext (sLit "Malformed instance:") <+> ppr ty \end{code} rnHsType is here because we call it from loadInstDecl, and I didn't @@ -98,7 +91,7 @@ want a gratuitous knot. rnLHsTyKi :: Bool -- True <=> renaming a type, False <=> a kind -> HsDocContext -> LHsType RdrName -> RnM (LHsType Name, FreeVars) rnLHsTyKi isType doc (L loc ty) - = setSrcSpan loc $ + = setSrcSpan loc $ do { (ty', fvs) <- rnHsTyKi isType doc ty ; return (L loc ty', fvs) } @@ -110,9 +103,9 @@ rnLHsKind = rnLHsTyKi False rnLHsMaybeKind :: HsDocContext -> Maybe (LHsKind RdrName) -> RnM (Maybe (LHsKind Name), FreeVars) -rnLHsMaybeKind _ Nothing +rnLHsMaybeKind _ Nothing = return (Nothing, emptyFVs) -rnLHsMaybeKind doc (Just kind) +rnLHsMaybeKind doc (Just kind) = do { (kind', fvs) <- rnLHsKind doc kind ; return (Just kind', fvs) } @@ -123,15 +116,15 @@ rnHsKind = rnHsTyKi False rnHsTyKi :: Bool -> HsDocContext -> HsType RdrName -> RnM (HsType Name, FreeVars) -rnHsTyKi isType doc (HsForAllTy Implicit _ lctxt@(L _ ctxt) ty) - = ASSERT ( isType ) do - -- Implicit quantifiction in source code (no kinds on tyvars) - -- Given the signature C => T we universally quantify - -- over FV(T) \ {in-scope-tyvars} +rnHsTyKi isType doc (HsForAllTy Implicit _ lctxt@(L _ ctxt) ty) + = ASSERT( isType ) do + -- Implicit quantifiction in source code (no kinds on tyvars) + -- Given the signature C => T we universally quantify + -- over FV(T) \ {in-scope-tyvars} rdr_env <- getLocalRdrEnv loc <- getSrcSpanM let - (forall_kvs, forall_tvs) = filterInScope rdr_env $ + (forall_kvs, forall_tvs) = filterInScope rdr_env $ extractHsTysRdrTyVars (ty:ctxt) -- In for-all types we don't bring in scope -- kind variables mentioned in kind signatures @@ -139,17 +132,17 @@ rnHsTyKi isType doc (HsForAllTy Implicit _ lctxt@(L _ ctxt) ty) -- f :: Int -> T (a::k) -- Not allowed -- The filterInScope is to ensure that we don't quantify over - -- type variables that are in scope; when GlasgowExts is off, - -- there usually won't be any, except for class signatures: - -- class C a where { op :: a -> a } - tyvar_bndrs = userHsTyVarBndrs loc forall_tvs + -- type variables that are in scope; when GlasgowExts is off, + -- there usually won't be any, except for class signatures: + -- class C a where { op :: a -> a } + tyvar_bndrs = userHsTyVarBndrs loc forall_tvs rnForAll doc Implicit forall_kvs (mkHsQTvs tyvar_bndrs) lctxt ty rnHsTyKi isType doc ty@(HsForAllTy Explicit forall_tyvars lctxt@(L _ ctxt) tau) - = ASSERT ( isType ) do { -- Explicit quantification. - -- Check that the forall'd tyvars are actually - -- mentioned in the type, and produce a warning if not + = ASSERT( isType ) do { -- Explicit quantification. + -- Check that the forall'd tyvars are actually + -- mentioned in the type, and produce a warning if not let (kvs, mentioned) = extractHsTysRdrTyVars (tau:ctxt) in_type_doc = ptext (sLit "In the type") <+> quotes (ppr ty) ; warnUnusedForAlls (in_type_doc $$ docOfHsDocContext doc) forall_tyvars mentioned @@ -164,17 +157,17 @@ rnHsTyKi isType _ (HsTyVar rdr_name) -- a sensible error message, but we don't want to complain about the dot too -- Hence the jiggery pokery with ty1 rnHsTyKi isType doc ty@(HsOpTy ty1 (wrapper, L loc op) ty2) - = ASSERT ( isType ) setSrcSpan loc $ - do { ops_ok <- xoptM Opt_TypeOperators - ; op' <- if ops_ok - then rnTyVar isType op - else do { addErr (opTyErr op ty) - ; return (mkUnboundName op) } -- Avoid double complaint - ; let l_op' = L loc op' - ; fix <- lookupTyFixityRn l_op' - ; (ty1', fvs1) <- rnLHsType doc ty1 - ; (ty2', fvs2) <- rnLHsType doc ty2 - ; res_ty <- mkHsOpTyRn (\t1 t2 -> HsOpTy t1 (wrapper, l_op') t2) + = ASSERT( isType ) setSrcSpan loc $ + do { ops_ok <- xoptM Opt_TypeOperators + ; op' <- if ops_ok + then rnTyVar isType op + else do { addErr (opTyErr op ty) + ; return (mkUnboundName op) } -- Avoid double complaint + ; let l_op' = L loc op' + ; fix <- lookupTyFixityRn l_op' + ; (ty1', fvs1) <- rnLHsType doc ty1 + ; (ty2', fvs2) <- rnLHsType doc ty2 + ; res_ty <- mkHsOpTyRn (\t1 t2 -> HsOpTy t1 (wrapper, l_op') t2) op' fix ty1' ty2' ; return (res_ty, (fvs1 `plusFV` fvs2) `addOneFV` op') } @@ -183,23 +176,24 @@ rnHsTyKi isType doc (HsParTy ty) ; return (HsParTy ty', fvs) } rnHsTyKi isType doc (HsBangTy b ty) - = ASSERT ( isType ) + = ASSERT( isType ) do { (ty', fvs) <- rnLHsType doc ty ; return (HsBangTy b ty', fvs) } -rnHsTyKi isType doc (HsRecTy flds) - = ASSERT ( isType ) - do { (flds', fvs) <- rnConDeclFields doc flds +rnHsTyKi _ doc ty@(HsRecTy flds) + = do { addErr (hang (ptext (sLit "Record syntax is illegal here:")) + 2 (ppr ty)) + ; (flds', fvs) <- rnConDeclFields doc flds ; return (HsRecTy flds', fvs) } rnHsTyKi isType doc (HsFunTy ty1 ty2) = do { (ty1', fvs1) <- rnLHsTyKi isType doc ty1 - -- Might find a for-all as the arg of a function type + -- Might find a for-all as the arg of a function type ; (ty2', fvs2) <- rnLHsTyKi isType doc ty2 - -- Or as the result. This happens when reading Prelude.hi - -- when we find return :: forall m. Monad m -> forall a. a -> m a + -- Or as the result. This happens when reading Prelude.hi + -- when we find return :: forall m. Monad m -> forall a. a -> m a - -- Check for fixity rearrangements + -- Check for fixity rearrangements ; res_ty <- if isType then mkHsOpTyRn HsFunTy funTyConName funTyFixity ty1' ty2' else return (HsFunTy ty1' ty2') @@ -212,15 +206,15 @@ rnHsTyKi isType doc listTy@(HsListTy ty) ; return (HsListTy ty', fvs) } rnHsTyKi isType doc (HsKindSig ty k) - = ASSERT ( isType ) + = ASSERT( isType ) do { kind_sigs_ok <- xoptM Opt_KindSignatures ; unless kind_sigs_ok (badSigErr False doc ty) ; (ty', fvs1) <- rnLHsType doc ty ; (k', fvs2) <- rnLHsKind doc k ; return (HsKindSig ty' k', fvs1 `plusFV` fvs2) } -rnHsTyKi isType doc (HsPArrTy ty) - = ASSERT ( isType ) +rnHsTyKi isType doc (HsPArrTy ty) + = ASSERT( isType ) do { (ty', fvs) <- rnLHsType doc ty ; return (HsPArrTy ty', fvs) } @@ -249,19 +243,19 @@ rnHsTyKi isType doc (HsIParamTy n ty) do { (ty', fvs) <- rnLHsType doc ty ; return (HsIParamTy n ty', fvs) } -rnHsTyKi isType doc (HsEqTy ty1 ty2) +rnHsTyKi isType doc (HsEqTy ty1 ty2) = ASSERT( isType ) do { (ty1', fvs1) <- rnLHsType doc ty1 ; (ty2', fvs2) <- rnLHsType doc ty2 ; return (HsEqTy ty1' ty2', fvs1 `plusFV` fvs2) } rnHsTyKi isType _ (HsSpliceTy sp _ k) - = ASSERT ( isType ) - do { (sp', fvs) <- rnSplice sp -- ToDo: deal with fvs + = ASSERT( isType ) + do { (sp', fvs) <- rnSplice sp -- ToDo: deal with fvs ; return (HsSpliceTy sp' fvs k, fvs) } -rnHsTyKi isType doc (HsDocTy ty haddock_doc) - = ASSERT ( isType ) +rnHsTyKi isType doc (HsDocTy ty haddock_doc) + = ASSERT( isType ) do { (ty', fvs) <- rnLHsType doc ty ; haddock_doc' <- rnLHsDoc haddock_doc ; return (HsDocTy ty' haddock_doc', fvs) } @@ -269,19 +263,19 @@ rnHsTyKi isType doc (HsDocTy ty haddock_doc) #ifndef GHCI rnHsTyKi _ _ ty@(HsQuasiQuoteTy _) = pprPanic "Can't do quasiquotation without GHCi" (ppr ty) #else -rnHsTyKi isType doc (HsQuasiQuoteTy qq) - = ASSERT ( isType ) +rnHsTyKi isType doc (HsQuasiQuoteTy qq) + = ASSERT( isType ) do { ty <- runQuasiQuoteType qq ; rnHsType doc (unLoc ty) } #endif -rnHsTyKi isType _ (HsCoreTy ty) - = ASSERT ( isType ) +rnHsTyKi isType _ (HsCoreTy ty) + = ASSERT( isType ) return (HsCoreTy ty, emptyFVs) - -- The emptyFVs probably isn't quite right + -- The emptyFVs probably isn't quite right -- but I don't think it matters -rnHsTyKi _ _ (HsWrapTy {}) +rnHsTyKi _ _ (HsWrapTy {}) = panic "rnHsTyKi" rnHsTyKi isType doc ty@(HsExplicitListTy k tys) @@ -291,7 +285,7 @@ rnHsTyKi isType doc ty@(HsExplicitListTy k tys) ; (tys', fvs) <- rnLHsTypes doc tys ; return (HsExplicitListTy k tys', fvs) } -rnHsTyKi isType doc ty@(HsExplicitTupleTy kis tys) +rnHsTyKi isType doc ty@(HsExplicitTupleTy kis tys) = ASSERT( isType ) do { data_kinds <- xoptM Opt_DataKinds ; unless data_kinds (addErr (dataKindsErr isType ty)) @@ -313,54 +307,54 @@ rnLHsTypes doc tys = mapFvRn (rnLHsType doc) tys \begin{code} -rnForAll :: HsDocContext -> HsExplicitFlag +rnForAll :: HsDocContext -> HsExplicitFlag -> [RdrName] -- Kind variables -> LHsTyVarBndrs RdrName -- Type variables - -> LHsContext RdrName -> LHsType RdrName + -> LHsContext RdrName -> LHsType RdrName -> RnM (HsType Name, FreeVars) rnForAll doc exp kvs forall_tyvars ctxt ty | null kvs, null (hsQTvBndrs forall_tyvars), null (unLoc ctxt) = rnHsType doc (unLoc ty) - -- One reason for this case is that a type like Int# - -- starts off as (HsForAllTy Nothing [] Int), in case - -- there is some quantification. Now that we have quantified - -- and discovered there are no type variables, it's nicer to turn - -- it into plain Int. If it were Int# instead of Int, we'd actually - -- get an error, because the body of a genuine for-all is - -- of kind *. + -- One reason for this case is that a type like Int# + -- starts off as (HsForAllTy Nothing [] Int), in case + -- there is some quantification. Now that we have quantified + -- and discovered there are no type variables, it's nicer to turn + -- it into plain Int. If it were Int# instead of Int, we'd actually + -- get an error, because the body of a genuine for-all is + -- of kind *. | otherwise = bindHsTyVars doc Nothing kvs forall_tyvars $ \ new_tyvars -> do { (new_ctxt, fvs1) <- rnContext doc ctxt ; (new_ty, fvs2) <- rnLHsType doc ty ; return (HsForAllTy exp new_tyvars new_ctxt new_ty, fvs1 `plusFV` fvs2) } - -- Retain the same implicit/explicit flag as before - -- so that we can later print it correctly + -- Retain the same implicit/explicit flag as before + -- so that we can later print it correctly --------------- bindSigTyVarsFV :: [Name] - -> RnM (a, FreeVars) - -> RnM (a, FreeVars) + -> RnM (a, FreeVars) + -> RnM (a, FreeVars) -- Used just before renaming the defn of a function -- with a separate type signature, to bring its tyvars into scope -- With no -XScopedTypeVariables, this is a no-op bindSigTyVarsFV tvs thing_inside - = do { scoped_tyvars <- xoptM Opt_ScopedTypeVariables - ; if not scoped_tyvars then - thing_inside - else - bindLocalNamesFV tvs thing_inside } + = do { scoped_tyvars <- xoptM Opt_ScopedTypeVariables + ; if not scoped_tyvars then + thing_inside + else + bindLocalNamesFV tvs thing_inside } --------------- -bindHsTyVars :: HsDocContext +bindHsTyVars :: HsDocContext -> Maybe a -- Just _ => an associated type decl -> [RdrName] -- Kind variables from scope -> LHsTyVarBndrs RdrName -- Type variables -> (LHsTyVarBndrs Name -> RnM (b, FreeVars)) -> RnM (b, FreeVars) --- (a) Bring kind variables into scope --- both (i) passed in (kv_bndrs) +-- (a) Bring kind variables into scope +-- both (i) passed in (kv_bndrs) -- and (ii) mentioned in the kinds of tv_bndrs -- (b) Bring type variables into scope bindHsTyVars doc mb_assoc kv_bndrs tv_bndrs thing_inside @@ -377,26 +371,26 @@ bindHsTyVars doc mb_assoc kv_bndrs tv_bndrs thing_inside -- We disallow this: too confusing! ; poly_kind <- xoptM Opt_PolyKinds - ; unless (poly_kind || null all_kvs) + ; unless (poly_kind || null all_kvs) (addErr (badKindBndrs doc all_kvs)) - ; unless (null overlap_kvs) + ; unless (null overlap_kvs) (addErr (overlappingKindVars doc overlap_kvs)) ; loc <- getSrcSpanM ; kv_names <- mapM (newLocalBndrRn . L loc) all_kvs - ; bindLocalNamesFV kv_names $ + ; bindLocalNamesFV kv_names $ do { let tv_names_w_loc = hsLTyVarLocNames tv_bndrs - rn_tv_bndr :: LHsTyVarBndr RdrName -> RnM (LHsTyVarBndr Name, FreeVars) - rn_tv_bndr (L loc (UserTyVar rdr)) - = do { nm <- newTyVarNameRn mb_assoc rdr_env loc rdr - ; return (L loc (UserTyVar nm), emptyFVs) } - rn_tv_bndr (L loc (KindedTyVar rdr kind)) - = do { sig_ok <- xoptM Opt_KindSignatures + rn_tv_bndr :: LHsTyVarBndr RdrName -> RnM (LHsTyVarBndr Name, FreeVars) + rn_tv_bndr (L loc (UserTyVar rdr)) + = do { nm <- newTyVarNameRn mb_assoc rdr_env loc rdr + ; return (L loc (UserTyVar nm), emptyFVs) } + rn_tv_bndr (L loc (KindedTyVar rdr kind)) + = do { sig_ok <- xoptM Opt_KindSignatures ; unless sig_ok (badSigErr False doc kind) ; nm <- newTyVarNameRn mb_assoc rdr_env loc rdr - ; (kind', fvs) <- rnLHsKind doc kind - ; return (L loc (KindedTyVar nm kind'), fvs) } + ; (kind', fvs) <- rnLHsKind doc kind + ; return (L loc (KindedTyVar nm kind'), fvs) } -- Check for duplicate or shadowed tyvar bindrs ; checkDupRdrNames tv_names_w_loc @@ -413,8 +407,8 @@ newTyVarNameRn :: Maybe a -> LocalRdrEnv -> SrcSpan -> RdrName -> RnM Name newTyVarNameRn mb_assoc rdr_env loc rdr | Just _ <- mb_assoc -- Use the same Name as the parent class decl , Just n <- lookupLocalRdrEnv rdr_env rdr - = return n - | otherwise + = return n + | otherwise = newLocalBndrRn (L loc rdr) -------------------------------- @@ -431,16 +425,16 @@ rnHsBndrSig doc (HsWB { hswb_cts = ty@(L loc _) }) thing_inside , not (tv `elemLocalRdrEnv` name_env) ] ; kv_names <- newLocalBndrsRn [L loc kv | kv <- kv_bndrs , not (kv `elemLocalRdrEnv` name_env) ] - ; bindLocalNamesFV kv_names $ - bindLocalNamesFV tv_names $ + ; bindLocalNamesFV kv_names $ + bindLocalNamesFV tv_names $ do { (ty', fvs1) <- rnLHsType doc ty ; (res, fvs2) <- thing_inside (HsWB { hswb_cts = ty', hswb_kvs = kv_names, hswb_tvs = tv_names }) ; return (res, fvs1 `plusFV` fvs2) } } overlappingKindVars :: HsDocContext -> [RdrName] -> SDoc overlappingKindVars doc kvs - = vcat [ ptext (sLit "Kind variable") <> plural kvs <+> - ptext (sLit "also used as type variable") <> plural kvs + = vcat [ ptext (sLit "Kind variable") <> plural kvs <+> + ptext (sLit "also used as type variable") <> plural kvs <> colon <+> pprQuotedList kvs , docOfHsDocContext doc ] @@ -454,7 +448,7 @@ badKindBndrs doc kvs badSigErr :: Bool -> HsDocContext -> LHsType RdrName -> TcM () badSigErr is_type doc (L loc ty) = setSrcSpan loc $ addErr $ - vcat [ hang (ptext (sLit "Illegal") <+> what + vcat [ hang (ptext (sLit "Illegal") <+> what <+> ptext (sLit "signature:") <+> quotes (ppr ty)) 2 (ptext (sLit "Perhaps you intended to use") <+> flag) , docOfHsDocContext doc ] @@ -473,14 +467,14 @@ dataKindsErr is_type thing | otherwise = ptext (sLit "kind") \end{code} -Note [Renaming associated types] +Note [Renaming associated types] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Check that the RHS of the decl mentions only type variables bound on the LHS. For example, this is not ok class C a b where type F a x :: * instance C (p,q) r where - type F (p,q) x = (x, r) -- BAD: mentions 'r' + type F (p,q) x = (x, r) -- BAD: mentions 'r' c.f. Trac #5515 What makes it tricky is that the *kind* variable from the class *are* @@ -488,8 +482,8 @@ in scope (Trac #5862): class Category (x :: k -> k -> *) where type Ob x :: k -> Constraint id :: Ob x a => x a a - (.) :: (Ob x a, Ob x b, Ob x c) => x b c -> x a b -> x a c -Here 'k' is in scope in the kind signature even though it's not + (.) :: (Ob x a, Ob x b, Ob x c) => x b c -> x a b -> x a c +Here 'k' is in scope in the kind signature even though it's not explicitly mentioned on the LHS of the type Ob declaration. We could force you to mention k explicitly, thus @@ -499,13 +493,13 @@ but it seems tiresome to do so. %********************************************************* -%* * +%* * \subsection{Contexts and predicates} -%* * +%* * %********************************************************* \begin{code} -rnConDeclFields :: HsDocContext -> [ConDeclField RdrName] +rnConDeclFields :: HsDocContext -> [ConDeclField RdrName] -> RnM ([ConDeclField Name], FreeVars) rnConDeclFields doc fields = mapFvRn (rnField doc) fields @@ -517,16 +511,16 @@ rnField doc (ConDeclField name ty haddock_doc) ; return (ConDeclField new_name new_ty new_haddock_doc, fvs) } rnContext :: HsDocContext -> LHsContext RdrName -> RnM (LHsContext Name, FreeVars) -rnContext doc (L loc cxt) +rnContext doc (L loc cxt) = do { (cxt', fvs) <- rnLHsTypes doc cxt ; return (L loc cxt', fvs) } \end{code} %************************************************************************ -%* * - Fixities and precedence parsing -%* * +%* * + Fixities and precedence parsing +%* * %************************************************************************ @mkOpAppRn@ deals with operator fixities. The argument expressions @@ -539,9 +533,9 @@ operator application. Why? Because the parser parses all operator appications left-associatively, EXCEPT negation, which we need to handle specially. Infix types are read in a *right-associative* way, so that - a `op` b `op` c + a `op` b `op` c is always read in as - a `op` (b `op` c) + a `op` (b `op` c) mkHsOpTyRn rearranges where necessary. The two arguments have already been renamed and rearranged. It's made rather tiresome @@ -551,46 +545,46 @@ by the presence of ->, which is a separate syntactic construct. --------------- -- Building (ty1 `op1` (ty21 `op2` ty22)) mkHsOpTyRn :: (LHsType Name -> LHsType Name -> HsType Name) - -> Name -> Fixity -> LHsType Name -> LHsType Name - -> RnM (HsType Name) + -> Name -> Fixity -> LHsType Name -> LHsType Name + -> RnM (HsType Name) mkHsOpTyRn mk1 pp_op1 fix1 ty1 (L loc2 (HsOpTy ty21 (w2, op2) ty22)) = do { fix2 <- lookupTyFixityRn op2 - ; mk_hs_op_ty mk1 pp_op1 fix1 ty1 - (\t1 t2 -> HsOpTy t1 (w2, op2) t2) - (unLoc op2) fix2 ty21 ty22 loc2 } + ; mk_hs_op_ty mk1 pp_op1 fix1 ty1 + (\t1 t2 -> HsOpTy t1 (w2, op2) t2) + (unLoc op2) fix2 ty21 ty22 loc2 } mkHsOpTyRn mk1 pp_op1 fix1 ty1 (L loc2 (HsFunTy ty21 ty22)) - = mk_hs_op_ty mk1 pp_op1 fix1 ty1 - HsFunTy funTyConName funTyFixity ty21 ty22 loc2 + = mk_hs_op_ty mk1 pp_op1 fix1 ty1 + HsFunTy funTyConName funTyFixity ty21 ty22 loc2 -mkHsOpTyRn mk1 _ _ ty1 ty2 -- Default case, no rearrangment +mkHsOpTyRn mk1 _ _ ty1 ty2 -- Default case, no rearrangment = return (mk1 ty1 ty2) --------------- mk_hs_op_ty :: (LHsType Name -> LHsType Name -> HsType Name) - -> Name -> Fixity -> LHsType Name - -> (LHsType Name -> LHsType Name -> HsType Name) - -> Name -> Fixity -> LHsType Name -> LHsType Name -> SrcSpan - -> RnM (HsType Name) -mk_hs_op_ty mk1 op1 fix1 ty1 - mk2 op2 fix2 ty21 ty22 loc2 + -> Name -> Fixity -> LHsType Name + -> (LHsType Name -> LHsType Name -> HsType Name) + -> Name -> Fixity -> LHsType Name -> LHsType Name -> SrcSpan + -> RnM (HsType Name) +mk_hs_op_ty mk1 op1 fix1 ty1 + mk2 op2 fix2 ty21 ty22 loc2 | nofix_error = do { precParseErr (op1,fix1) (op2,fix2) - ; return (mk1 ty1 (L loc2 (mk2 ty21 ty22))) } + ; return (mk1 ty1 (L loc2 (mk2 ty21 ty22))) } | associate_right = return (mk1 ty1 (L loc2 (mk2 ty21 ty22))) - | otherwise = do { -- Rearrange to ((ty1 `op1` ty21) `op2` ty22) - new_ty <- mkHsOpTyRn mk1 op1 fix1 ty1 ty21 - ; return (mk2 (noLoc new_ty) ty22) } + | otherwise = do { -- Rearrange to ((ty1 `op1` ty21) `op2` ty22) + new_ty <- mkHsOpTyRn mk1 op1 fix1 ty1 ty21 + ; return (mk2 (noLoc new_ty) ty22) } where (nofix_error, associate_right) = compareFixity fix1 fix2 --------------------------- -mkOpAppRn :: LHsExpr Name -- Left operand; already rearranged - -> LHsExpr Name -> Fixity -- Operator and fixity - -> LHsExpr Name -- Right operand (not an OpApp, but might - -- be a NegApp) - -> RnM (HsExpr Name) +mkOpAppRn :: LHsExpr Name -- Left operand; already rearranged + -> LHsExpr Name -> Fixity -- Operator and fixity + -> LHsExpr Name -- Right operand (not an OpApp, but might + -- be a NegApp) + -> RnM (HsExpr Name) -- (e11 `op1` e12) `op2` e2 mkOpAppRn e1@(L _ (OpApp e11 op1 fix1 e12)) op2 fix2 e2 @@ -606,13 +600,13 @@ mkOpAppRn e1@(L _ (OpApp e11 op1 fix1 e12)) op2 fix2 e2 (nofix_error, associate_right) = compareFixity fix1 fix2 --------------------------- --- (- neg_arg) `op` e2 +-- (- neg_arg) `op` e2 mkOpAppRn e1@(L _ (NegApp neg_arg neg_name)) op2 fix2 e2 | nofix_error = do precParseErr (negateName,negateFixity) (get_op op2,fix2) return (OpApp e1 op2 fix2 e2) - | associate_right + | associate_right = do new_e <- mkOpAppRn neg_arg op2 fix2 e2 return (NegApp (L loc' new_e) neg_name) where @@ -620,19 +614,19 @@ mkOpAppRn e1@(L _ (NegApp neg_arg neg_name)) op2 fix2 e2 (nofix_error, associate_right) = compareFixity negateFixity fix2 --------------------------- --- e1 `op` - neg_arg -mkOpAppRn e1 op1 fix1 e2@(L _ (NegApp _ _)) -- NegApp can occur on the right - | not associate_right -- We *want* right association +-- e1 `op` - neg_arg +mkOpAppRn e1 op1 fix1 e2@(L _ (NegApp _ _)) -- NegApp can occur on the right + | not associate_right -- We *want* right association = do precParseErr (get_op op1, fix1) (negateName, negateFixity) return (OpApp e1 op1 fix1 e2) where (_, associate_right) = compareFixity fix1 negateFixity --------------------------- --- Default case -mkOpAppRn e1 op fix e2 -- Default case, no rearrangment +-- Default case +mkOpAppRn e1 op fix e2 -- Default case, no rearrangment = ASSERT2( right_op_ok fix (unLoc e2), - ppr e1 $$ text "---" $$ ppr op $$ text "---" $$ ppr fix $$ text "---" $$ ppr e2 + ppr e1 $$ text "---" $$ ppr op $$ text "---" $$ ppr fix $$ text "---" $$ ppr e2 ) return (OpApp e1 op fix e2) @@ -641,7 +635,7 @@ get_op :: LHsExpr Name -> Name get_op (L _ (HsVar n)) = n get_op other = pprPanic "get_op" (ppr other) --- Parser left-associates everything, but +-- Parser left-associates everything, but -- derived instances may have correctly-associated things to -- in the right operarand. So we just check that the right operand is OK right_op_ok :: Fixity -> HsExpr Name -> Bool @@ -661,17 +655,17 @@ mkNegAppRn neg_arg neg_name not_op_app :: HsExpr id -> Bool not_op_app (OpApp _ _ _ _) = False -not_op_app _ = True +not_op_app _ = True --------------------------- -mkOpFormRn :: LHsCmdTop Name -- Left operand; already rearranged - -> LHsExpr Name -> Fixity -- Operator and fixity - -> LHsCmdTop Name -- Right operand (not an infix) - -> RnM (HsCmd Name) +mkOpFormRn :: LHsCmdTop Name -- Left operand; already rearranged + -> LHsExpr Name -> Fixity -- Operator and fixity + -> LHsCmdTop Name -- Right operand (not an infix) + -> RnM (HsCmd Name) -- (e11 `op1` e12) `op2` e2 mkOpFormRn a1@(L loc (HsCmdTop (L _ (HsCmdArrForm op1 (Just fix1) [a11,a12])) _ _ _)) - op2 fix2 a2 + op2 fix2 a2 | nofix_error = do precParseErr (get_op op1,fix1) (get_op op2,fix2) return (HsCmdArrForm op2 (Just fix2) [a1, a2]) @@ -679,40 +673,40 @@ mkOpFormRn a1@(L loc (HsCmdTop (L _ (HsCmdArrForm op1 (Just fix1) [a11,a12])) _ | associate_right = do new_c <- mkOpFormRn a12 op2 fix2 a2 return (HsCmdArrForm op1 (Just fix1) - [a11, L loc (HsCmdTop (L loc new_c) placeHolderType placeHolderType [])]) - -- TODO: locs are wrong + [a11, L loc (HsCmdTop (L loc new_c) placeHolderType placeHolderType [])]) + -- TODO: locs are wrong where (nofix_error, associate_right) = compareFixity fix1 fix2 --- Default case -mkOpFormRn arg1 op fix arg2 -- Default case, no rearrangment +-- Default case +mkOpFormRn arg1 op fix arg2 -- Default case, no rearrangment = return (HsCmdArrForm op (Just fix) [arg1, arg2]) -------------------------------------- mkConOpPatRn :: Located Name -> Fixity -> LPat Name -> LPat Name - -> RnM (Pat Name) + -> RnM (Pat Name) mkConOpPatRn op2 fix2 p1@(L loc (ConPatIn op1 (InfixCon p11 p12))) p2 - = do { fix1 <- lookupFixityRn (unLoc op1) - ; let (nofix_error, associate_right) = compareFixity fix1 fix2 + = do { fix1 <- lookupFixityRn (unLoc op1) + ; let (nofix_error, associate_right) = compareFixity fix1 fix2 - ; if nofix_error then do - { precParseErr (unLoc op1,fix1) (unLoc op2,fix2) - ; return (ConPatIn op2 (InfixCon p1 p2)) } + ; if nofix_error then do + { precParseErr (unLoc op1,fix1) (unLoc op2,fix2) + ; return (ConPatIn op2 (InfixCon p1 p2)) } - else if associate_right then do - { new_p <- mkConOpPatRn op2 fix2 p12 p2 - ; return (ConPatIn op1 (InfixCon p11 (L loc new_p))) } -- XXX loc right? - else return (ConPatIn op2 (InfixCon p1 p2)) } + else if associate_right then do + { new_p <- mkConOpPatRn op2 fix2 p12 p2 + ; return (ConPatIn op1 (InfixCon p11 (L loc new_p))) } -- XXX loc right? + else return (ConPatIn op2 (InfixCon p1 p2)) } -mkConOpPatRn op _ p1 p2 -- Default case, no rearrangment +mkConOpPatRn op _ p1 p2 -- Default case, no rearrangment = ASSERT( not_op_pat (unLoc p2) ) return (ConPatIn op (InfixCon p1 p2)) not_op_pat :: Pat Name -> Bool not_op_pat (ConPatIn _ (InfixCon _ _)) = False -not_op_pat _ = True +not_op_pat _ = True -------------------------------------- checkPrecMatch :: Name -> MatchGroup Name body -> RnM () @@ -720,36 +714,36 @@ checkPrecMatch :: Name -> MatchGroup Name body -> RnM () -- eg a `op` b `C` c = ... -- See comments with rnExpr (OpApp ...) about "deriving" -checkPrecMatch op (MG { mg_alts = ms }) - = mapM_ check ms +checkPrecMatch op (MG { mg_alts = ms }) + = mapM_ check ms where check (L _ (Match (L l1 p1 : L l2 p2 :_) _ _)) = setSrcSpan (combineSrcSpans l1 l2) $ do checkPrec op p1 False checkPrec op p2 True - check _ = return () - -- This can happen. Consider - -- a `op` True = ... - -- op = ... - -- The infix flag comes from the first binding of the group - -- but the second eqn has no args (an error, but not discovered - -- until the type checker). So we don't want to crash on the - -- second eqn. + check _ = return () + -- This can happen. Consider + -- a `op` True = ... + -- op = ... + -- The infix flag comes from the first binding of the group + -- but the second eqn has no args (an error, but not discovered + -- until the type checker). So we don't want to crash on the + -- second eqn. checkPrec :: Name -> Pat Name -> Bool -> IOEnv (Env TcGblEnv TcLclEnv) () checkPrec op (ConPatIn op1 (InfixCon _ _)) right = do op_fix@(Fixity op_prec op_dir) <- lookupFixityRn op op1_fix@(Fixity op1_prec op1_dir) <- lookupFixityRn (unLoc op1) let - inf_ok = op1_prec > op_prec || - (op1_prec == op_prec && - (op1_dir == InfixR && op_dir == InfixR && right || - op1_dir == InfixL && op_dir == InfixL && not right)) - - info = (op, op_fix) - info1 = (unLoc op1, op1_fix) - (infol, infor) = if right then (info, info1) else (info1, info) + inf_ok = op1_prec > op_prec || + (op1_prec == op_prec && + (op1_dir == InfixR && op_dir == InfixR && right || + op1_dir == InfixL && op_dir == InfixL && not right)) + + info = (op, op_fix) + info1 = (unLoc op1, op1_fix) + (infol, infor) = if right then (info, info1) else (info1, info) unless inf_ok (precParseErr infol infor) checkPrec _ _ _ @@ -760,56 +754,56 @@ checkPrec _ _ _ -- (a) its precedence must be higher than that of op -- (b) its precedency & associativity must be the same as that of op checkSectionPrec :: FixityDirection -> HsExpr RdrName - -> LHsExpr Name -> LHsExpr Name -> RnM () + -> LHsExpr Name -> LHsExpr Name -> RnM () checkSectionPrec direction section op arg = case unLoc arg of - OpApp _ op fix _ -> go_for_it (get_op op) fix - NegApp _ _ -> go_for_it negateName negateFixity - _ -> return () + OpApp _ op fix _ -> go_for_it (get_op op) fix + NegApp _ _ -> go_for_it negateName negateFixity + _ -> return () where op_name = get_op op go_for_it arg_op arg_fix@(Fixity arg_prec assoc) = do op_fix@(Fixity op_prec _) <- lookupFixityRn op_name - unless (op_prec < arg_prec - || (op_prec == arg_prec && direction == assoc)) - (sectionPrecErr (op_name, op_fix) - (arg_op, arg_fix) section) + unless (op_prec < arg_prec + || (op_prec == arg_prec && direction == assoc)) + (sectionPrecErr (op_name, op_fix) + (arg_op, arg_fix) section) \end{code} Precedence-related error messages \begin{code} precParseErr :: (Name, Fixity) -> (Name, Fixity) -> RnM () -precParseErr op1@(n1,_) op2@(n2,_) +precParseErr op1@(n1,_) op2@(n2,_) | isUnboundName n1 || isUnboundName n2 - = return () -- Avoid error cascade + = return () -- Avoid error cascade | otherwise = addErr $ hang (ptext (sLit "Precedence parsing error")) - 4 (hsep [ptext (sLit "cannot mix"), ppr_opfix op1, ptext (sLit "and"), - ppr_opfix op2, - ptext (sLit "in the same infix expression")]) + 4 (hsep [ptext (sLit "cannot mix"), ppr_opfix op1, ptext (sLit "and"), + ppr_opfix op2, + ptext (sLit "in the same infix expression")]) sectionPrecErr :: (Name, Fixity) -> (Name, Fixity) -> HsExpr RdrName -> RnM () sectionPrecErr op@(n1,_) arg_op@(n2,_) section | isUnboundName n1 || isUnboundName n2 - = return () -- Avoid error cascade + = return () -- Avoid error cascade | otherwise = addErr $ vcat [ptext (sLit "The operator") <+> ppr_opfix op <+> ptext (sLit "of a section"), - nest 4 (sep [ptext (sLit "must have lower precedence than that of the operand,"), - nest 2 (ptext (sLit "namely") <+> ppr_opfix arg_op)]), - nest 4 (ptext (sLit "in the section:") <+> quotes (ppr section))] + nest 4 (sep [ptext (sLit "must have lower precedence than that of the operand,"), + nest 2 (ptext (sLit "namely") <+> ppr_opfix arg_op)]), + nest 4 (ptext (sLit "in the section:") <+> quotes (ppr section))] ppr_opfix :: (Name, Fixity) -> SDoc ppr_opfix (op, fixity) = pp_op <+> brackets (ppr fixity) where pp_op | op == negateName = ptext (sLit "prefix `-'") - | otherwise = quotes (ppr op) + | otherwise = quotes (ppr op) \end{code} %********************************************************* -%* * +%* * \subsection{Errors} -%* * +%* * %********************************************************* \begin{code} @@ -821,7 +815,7 @@ warnUnusedForAlls in_doc bound mentioned_rdrs bound_names = hsLTyVarLocNames bound bound_but_not_used = filterOut ((`elem` mentioned_rdrs) . unLoc) bound_names - add_warn (L loc tv) + add_warn (L loc tv) = addWarnAt loc $ vcat [ ptext (sLit "Unused quantified type variable") <+> quotes (ppr tv) , in_doc ] @@ -829,30 +823,30 @@ warnUnusedForAlls in_doc bound mentioned_rdrs opTyErr :: RdrName -> HsType RdrName -> SDoc opTyErr op ty@(HsOpTy ty1 _ _) = hang (ptext (sLit "Illegal operator") <+> quotes (ppr op) <+> ptext (sLit "in type") <+> quotes (ppr ty)) - 2 extra + 2 extra where extra | op == dot_tv_RDR && forall_head ty1 - = perhapsForallMsg - | otherwise - = ptext (sLit "Use -XTypeOperators to allow operators in types") + = perhapsForallMsg + | otherwise + = ptext (sLit "Use -XTypeOperators to allow operators in types") forall_head (L _ (HsTyVar tv)) = tv == forall_tv_RDR forall_head (L _ (HsAppTy ty _)) = forall_head ty - forall_head _other = False + forall_head _other = False opTyErr _ ty = pprPanic "opTyErr: Not an op" (ppr ty) \end{code} %********************************************************* -%* * - Splices -%* * +%* * + Splices +%* * %********************************************************* Note [Splices] ~~~~~~~~~~~~~~ Consider - f = ... - h = ...$(thing "f")... + f = ... + h = ...$(thing "f")... The splice can expand into literally anything, so when we do dependency analysis we must assume that it might mention 'f'. So we simply treat @@ -870,30 +864,30 @@ type checker. Not very satisfactory really. \begin{code} rnSplice :: HsSplice RdrName -> RnM (HsSplice Name, FreeVars) rnSplice (HsSplice n expr) - = do { checkTH expr "splice" - ; loc <- getSrcSpanM - ; n' <- newLocalBndrRn (L loc n) - ; (expr', fvs) <- rnLExpr expr + = do { checkTH expr "splice" + ; loc <- getSrcSpanM + ; n' <- newLocalBndrRn (L loc n) + ; (expr', fvs) <- rnLExpr expr - -- Ugh! See Note [Splices] above - ; lcl_rdr <- getLocalRdrEnv - ; gbl_rdr <- getGlobalRdrEnv - ; let gbl_names = mkNameSet [gre_name gre | gre <- globalRdrEnvElts gbl_rdr, - isLocalGRE gre] - lcl_names = mkNameSet (localRdrEnvElts lcl_rdr) + -- Ugh! See Note [Splices] above + ; lcl_rdr <- getLocalRdrEnv + ; gbl_rdr <- getGlobalRdrEnv + ; let gbl_names = mkNameSet [gre_name gre | gre <- globalRdrEnvElts gbl_rdr, + isLocalGRE gre] + lcl_names = mkNameSet (localRdrEnvElts lcl_rdr) - ; return (HsSplice n' expr', fvs `plusFV` lcl_names `plusFV` gbl_names) } + ; return (HsSplice n' expr', fvs `plusFV` lcl_names `plusFV` gbl_names) } checkTH :: Outputable a => a -> String -> RnM () -#ifdef GHCI -checkTH _ _ = return () -- OK +#ifdef GHCI +checkTH _ _ = return () -- OK #else -checkTH e what -- Raise an error in a stage-1 compiler - = addErr (vcat [ptext (sLit "Template Haskell") <+> text what <+> - ptext (sLit "requires GHC with interpreter support"), +checkTH e what -- Raise an error in a stage-1 compiler + = addErr (vcat [ptext (sLit "Template Haskell") <+> text what <+> + ptext (sLit "requires GHC with interpreter support"), ptext (sLit "Perhaps you are using a stage-1 compiler?"), - nest 2 (ppr e)]) -#endif + nest 2 (ppr e)]) +#endif \end{code} %************************************************************************ @@ -924,7 +918,7 @@ recently, kind variables. For example: * type instance F (T (a :: Maybe k)) = ...a...k... Here we want to constrain the kind of 'a', and bind 'k'. -In general we want to walk over a type, and find +In general we want to walk over a type, and find * Its free type variables * The free kind variables of any kind signatures in the type @@ -935,7 +929,7 @@ See also Note [HsBSig binder lists] in HsTypes type FreeKiTyVars = ([RdrName], [RdrName]) filterInScope :: LocalRdrEnv -> FreeKiTyVars -> FreeKiTyVars -filterInScope rdr_env (kvs, tvs) +filterInScope rdr_env (kvs, tvs) = (filterOut in_scope kvs, filterOut in_scope tvs) where in_scope tv = tv `elemLocalRdrEnv` rdr_env @@ -945,13 +939,13 @@ extractHsTyRdrTyVars :: LHsType RdrName -> FreeKiTyVars -- or the free (sort, kind) variables of a HsKind -- It's used when making the for-alls explicit. -- See Note [Kind and type-variable binders] -extractHsTyRdrTyVars ty +extractHsTyRdrTyVars ty = case extract_lty ty ([],[]) of (kvs, tvs) -> (nub kvs, nub tvs) extractHsTysRdrTyVars :: [LHsType RdrName] -> FreeKiTyVars -- See Note [Kind and type-variable binders] -extractHsTysRdrTyVars ty +extractHsTysRdrTyVars ty = case extract_ltys ty ([],[]) of (kvs, tvs) -> (nub kvs, nub tvs) @@ -1023,7 +1017,7 @@ extract_lty (L _ ty) acc extract_hs_tv_bndrs :: LHsTyVarBndrs RdrName -> FreeKiTyVars -> FreeKiTyVars -> FreeKiTyVars -extract_hs_tv_bndrs (HsQTvs { hsq_tvs = tvs }) +extract_hs_tv_bndrs (HsQTvs { hsq_tvs = tvs }) (acc_kvs, acc_tvs) -- Note accumulator comes first (body_kvs, body_tvs) | null tvs diff --git a/compiler/simplCore/CSE.lhs b/compiler/simplCore/CSE.lhs index 8bd15864c738..1d9ef45f7f2a 100644 --- a/compiler/simplCore/CSE.lhs +++ b/compiler/simplCore/CSE.lhs @@ -4,67 +4,42 @@ \section{Common subexpression} \begin{code} -{-# OPTIONS -fno-warn-tabs #-} --- The above warning supression flag is a temporary kludge. --- While working on this module you are encouraged to remove it and --- detab the module (please do the detabbing in a separate patch). See --- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces --- for details - -module CSE ( - cseProgram - ) where +module CSE (cseProgram) where #include "HsVersions.h" --- Note [Keep old CSEnv rep] --- ~~~~~~~~~~~~~~~~~~~~~~~~~ --- Temporarily retain code for the old representation for CSEnv --- Keeping it only so that we can switch back if a bug shows up --- or we want to do some performance comparisions --- --- NB: when you remove this, also delete hashExpr from CoreUtils -#ifdef OLD_CSENV_REP -import CoreUtils ( exprIsBig, hashExpr, eqExpr ) -import StaticFlags ( opt_PprStyle_Debug ) -import Util ( lengthExceeds ) -import UniqFM -import FastString -#else -import TrieMap -#endif - import CoreSubst -import Var ( Var ) -import Id ( Id, idType, idInlineActivation, zapIdOccInfo ) -import CoreUtils ( mkAltExpr +import Var ( Var ) +import Id ( Id, idType, idInlineActivation, zapIdOccInfo ) +import CoreUtils ( mkAltExpr , exprIsTrivial) -import Type ( tyConAppArgs ) +import Type ( tyConAppArgs ) import CoreSyn import Outputable -import BasicTypes ( isAlwaysActive ) +import BasicTypes ( isAlwaysActive ) +import TrieMap import Data.List \end{code} - Simple common sub-expression - ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + Simple common sub-expression + ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ When we see - x1 = C a b - x2 = C x1 b + x1 = C a b + x2 = C x1 b we build up a reverse mapping: C a b -> x1 - C x1 b -> x2 + C x1 b -> x2 and apply that to the rest of the program. When we then see - y1 = C a b - y2 = C y1 b + y1 = C a b + y2 = C y1 b we replace the C a b with x1. But then we *dont* want to add x1 -> y1 to the mapping. Rather, we want the reverse, y1 -> x1 so that a subsequent binding - y2 = C y1 b -will get transformed to C x1 b, and then to x2. + y2 = C y1 b +will get transformed to C x1 b, and then to x2. So we carry an extra var->var substitution which we apply *before* looking up in the reverse mapping. @@ -74,9 +49,9 @@ Note [Shadowing] ~~~~~~~~~~~~~~~~ We have to be careful about shadowing. For example, consider - f = \x -> let y = x+x in - h = \x -> x+x - in ... + f = \x -> let y = x+x in + h = \x -> x+x + in ... Here we must *not* do CSE on the inner x+x! The simplifier used to guarantee no shadowing, but it doesn't any more (it proved too hard), so we clone as we go. @@ -86,9 +61,9 @@ Note [Case binders 1] ~~~~~~~~~~~~~~~~~~~~~~ Consider - f = \x -> case x of wild { - (a:as) -> case a of wild1 { - (p,q) -> ...(wild1:as)... + f = \x -> case x of wild { + (a:as) -> case a of wild1 { + (p,q) -> ...(wild1:as)... Here, (wild1:as) is morally the same as (a:as) and hence equal to wild. But that's not quite obvious. In general we want to keep it as (wild1:as), @@ -101,44 +76,44 @@ to try to replaces uses of 'a' with uses of 'wild1' Note [Case binders 2] ~~~~~~~~~~~~~~~~~~~~~~ Consider - case (h x) of y -> ...(h x)... + case (h x) of y -> ...(h x)... We'd like to replace (h x) in the alternative, by y. But because of the preceding [Note: case binders 1], we only want to add the mapping - scrutinee -> case binder + scrutinee -> case binder to the reverse CSE mapping if the scrutinee is a non-trivial expression. (If the scrutinee is a simple variable we want to add the mapping - case binder -> scrutinee + case binder -> scrutinee to the substitution Note [CSE for INLINE and NOINLINE] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are careful to do no CSE inside functions that the user has marked as -INLINE or NOINLINE. In terms of Core, that means +INLINE or NOINLINE. In terms of Core, that means - a) we do not do CSE inside an InlineRule + a) we do not do CSE inside an InlineRule - b) we do not do CSE on the RHS of a binding b=e - unless b's InlinePragma is AlwaysActive + b) we do not do CSE on the RHS of a binding b=e + unless b's InlinePragma is AlwaysActive Here's why (examples from Roman Leshchinskiy). Consider - yes :: Int - {-# NOINLINE yes #-} - yes = undefined + yes :: Int + {-# NOINLINE yes #-} + yes = undefined - no :: Int - {-# NOINLINE no #-} - no = undefined + no :: Int + {-# NOINLINE no #-} + no = undefined - foo :: Int -> Int -> Int - {-# NOINLINE foo #-} - foo m n = n + foo :: Int -> Int -> Int + {-# NOINLINE foo #-} + foo m n = n - {-# RULES "foo/no" foo no = id #-} + {-# RULES "foo/no" foo no = id #-} - bar :: Int -> Int - bar = foo yes + bar :: Int -> Int + bar = foo yes We do not expect the rule to fire. But if we do CSE, then we get yes=no, and the rule does fire. Worse, whether we get yes=no or @@ -147,26 +122,26 @@ no=yes depends on the order of the definitions. In general, CSE should probably never touch things with INLINE pragmas as this could lead to surprising results. Consider - {-# INLINE foo #-} - foo = + {-# INLINE foo #-} + foo = - {-# NOINLINE bar #-} - bar = -- Same rhs as foo + {-# NOINLINE bar #-} + bar = -- Same rhs as foo If CSE produces - foo = bar + foo = bar then foo will never be inlined (when it should be); but if it produces - bar = foo + bar = foo bar will be inlined (when it should not be). Even if we remove INLINE foo, we'd still like foo to be inlined if rhs is small. This won't happen with foo = bar. Not CSE-ing inside INLINE also solves an annoying bug in CSE. Consider a worker/wrapper, in which the worker has turned into a single variable: - $wf = h - f = \x -> ...$wf... + $wf = h + f = \x -> ...$wf... Now CSE may transform to - f = \x -> ...h... + f = \x -> ...h... But the WorkerInfo for f still says $wf, which is now dead! This won't happen now that we don't look inside INLINEs (which wrappers are). @@ -178,9 +153,9 @@ Then we can CSE the inner (f x) to y. In fact 'case' is like a strict let-binding, and we can use cseRhs for dealing with the scrutinee. %************************************************************************ -%* * +%* * \section{Common subexpression} -%* * +%* * %************************************************************************ \begin{code} @@ -190,12 +165,12 @@ cseProgram binds = cseBinds emptyCSEnv binds cseBinds :: CSEnv -> [CoreBind] -> [CoreBind] cseBinds _ [] = [] cseBinds env (b:bs) = (b':bs') - where - (env1, b') = cseBind env b - bs' = cseBinds env1 bs + where + (env1, b') = cseBind env b + bs' = cseBinds env1 bs cseBind :: CSEnv -> CoreBind -> (CSEnv, CoreBind) -cseBind env (NonRec b e) +cseBind env (NonRec b e) = (env2, NonRec b' e') where (env1, b') = addBinder env b @@ -211,16 +186,16 @@ cseBind env (Rec pairs) cseRhs :: CSEnv -> (OutBndr, InExpr) -> (CSEnv, OutExpr) cseRhs env (id',rhs) = case lookupCSEnv env rhs' of - Just other_expr -> (env, other_expr) - Nothing -> (addCSEnvItem env rhs' (Var id'), rhs') + Just other_expr -> (env, other_expr) + Nothing -> (addCSEnvItem env rhs' (Var id'), rhs') where rhs' | isAlwaysActive (idInlineActivation id') = cseExpr env rhs - | otherwise = rhs - -- See Note [CSE for INLINE and NOINLINE] + | otherwise = rhs + -- See Note [CSE for INLINE and NOINLINE] tryForCSE :: CSEnv -> InExpr -> OutExpr tryForCSE env expr - | exprIsTrivial expr' = expr' -- No point + | exprIsTrivial expr' = expr' -- No point | Just smaller <- lookupCSEnv env expr' = smaller | otherwise = expr' where @@ -230,24 +205,24 @@ cseExpr :: CSEnv -> InExpr -> OutExpr cseExpr env (Type t) = Type (substTy (csEnvSubst env) t) cseExpr env (Coercion c) = Coercion (substCo (csEnvSubst env) c) cseExpr _ (Lit lit) = Lit lit -cseExpr env (Var v) = lookupSubst env v -cseExpr env (App f a) = App (cseExpr env f) (tryForCSE env a) +cseExpr env (Var v) = lookupSubst env v +cseExpr env (App f a) = App (cseExpr env f) (tryForCSE env a) cseExpr env (Tick t e) = Tick t (cseExpr env e) cseExpr env (Cast e co) = Cast (cseExpr env e) (substCo (csEnvSubst env) co) -cseExpr env (Lam b e) = let (env', b') = addBinder env b - in Lam b' (cseExpr env' e) -cseExpr env (Let bind e) = let (env', bind') = cseBind env bind - in Let bind' (cseExpr env' e) +cseExpr env (Lam b e) = let (env', b') = addBinder env b + in Lam b' (cseExpr env' e) +cseExpr env (Let bind e) = let (env', bind') = cseBind env bind + in Let bind' (cseExpr env' e) cseExpr env (Case scrut bndr ty alts) = Case scrut' bndr'' ty alts' - where - alts' = cseAlts env2 scrut' bndr bndr'' alts - (env1, bndr') = addBinder env bndr - bndr'' = zapIdOccInfo bndr' - -- The swizzling from Note [Case binders 2] may - -- cause a dead case binder to be alive, so we - -- play safe here and bring them all to life - (env2, scrut') = cseRhs env1 (bndr'', scrut) - -- Note [CSE for case expressions] + where + alts' = cseAlts env2 scrut' bndr bndr'' alts + (env1, bndr') = addBinder env bndr + bndr'' = zapIdOccInfo bndr' + -- The swizzling from Note [Case binders 2] may + -- cause a dead case binder to be alive, so we + -- play safe here and bring them all to life + (env2, scrut') = cseRhs env1 (bndr'', scrut) + -- Note [CSE for case expressions] cseAlts :: CSEnv -> OutExpr -> InBndr -> InBndr -> [InAlt] -> [OutAlt] @@ -255,103 +230,50 @@ cseAlts env scrut' bndr bndr' alts = map cse_alt alts where (con_target, alt_env) - = case scrut' of - Var v' -> (v', extendCSSubst env bndr v') -- See Note [Case binders 1] - -- map: bndr -> v' + = case scrut' of + Var v' -> (v', extendCSSubst env bndr v') -- See Note [Case binders 1] + -- map: bndr -> v' - _ -> (bndr', extendCSEnv env scrut' (Var bndr')) -- See Note [Case binders 2] - -- map: scrut' -> bndr' + _ -> (bndr', extendCSEnv env scrut' (Var bndr')) -- See Note [Case binders 2] + -- map: scrut' -> bndr' arg_tys = tyConAppArgs (idType bndr) cse_alt (DataAlt con, args, rhs) - | not (null args) - -- Don't try CSE if there are no args; it just increases the number - -- of live vars. E.g. - -- case x of { True -> ....True.... } - -- Don't replace True by x! - -- Hence the 'null args', which also deal with literals and DEFAULT - = (DataAlt con, args', tryForCSE new_env rhs) - where - (env', args') = addBinders alt_env args - new_env = extendCSEnv env' (mkAltExpr (DataAlt con) args' arg_tys) - (Var con_target) + | not (null args) + -- Don't try CSE if there are no args; it just increases the number + -- of live vars. E.g. + -- case x of { True -> ....True.... } + -- Don't replace True by x! + -- Hence the 'null args', which also deal with literals and DEFAULT + = (DataAlt con, args', tryForCSE new_env rhs) + where + (env', args') = addBinders alt_env args + new_env = extendCSEnv env' (mkAltExpr (DataAlt con) args' arg_tys) + (Var con_target) cse_alt (con, args, rhs) - = (con, args', tryForCSE env' rhs) - where - (env', args') = addBinders alt_env args + = (con, args', tryForCSE env' rhs) + where + (env', args') = addBinders alt_env args \end{code} %************************************************************************ -%* * +%* * \section{The CSE envt} -%* * +%* * %************************************************************************ \begin{code} -type InExpr = CoreExpr -- Pre-cloning +type InExpr = CoreExpr -- Pre-cloning type InBndr = CoreBndr type InAlt = CoreAlt -type OutExpr = CoreExpr -- Post-cloning +type OutExpr = CoreExpr -- Post-cloning type OutBndr = CoreBndr type OutAlt = CoreAlt --- See Note [Keep old CsEnv rep] -#ifdef OLD_CSENV_REP -data CSEnv = CS { cs_map :: CSEMap - , cs_subst :: Subst } - -type CSEMap = UniqFM [(OutExpr, OutExpr)] -- This is the reverse mapping - -- It maps the hash-code of an expression e to list of (e,e') pairs - -- This means that it's good to replace e by e' - -- INVARIANT: The expr in the range has already been CSE'd - -emptyCSEnv :: CSEnv -emptyCSEnv = CS { cs_map = emptyUFM, cs_subst = emptySubst } - -lookupCSEnv :: CSEnv -> OutExpr -> Maybe OutExpr -lookupCSEnv (CS { cs_map = oldmap, cs_subst = sub}) expr - = case lookupUFM oldmap (hashExpr expr) of - Nothing -> Nothing - Just pairs -> lookup_list pairs - where - in_scope = substInScope sub - - -- In this lookup we use full expression equality - -- Reason: when expressions differ we generally find out quickly - -- but I found that cheapEqExpr was saying (\x.x) /= (\y.y), - -- and this kind of thing happened in real programs - lookup_list :: [(OutExpr,OutExpr)] -> Maybe OutExpr - lookup_list ((e,e'):es) - | eqExpr in_scope e expr = Just e' - | otherwise = lookup_list es - lookup_list [] = Nothing - -addCSEnvItem :: CSEnv -> OutExpr -> OutExpr -> CSEnv -addCSEnvItem env expr expr' | exprIsBig expr = env - | otherwise = extendCSEnv env expr expr' - -- We don't try to CSE big expressions, because they are expensive to compare - -- (and are unlikely to be the same anyway) - -extendCSEnv :: CSEnv -> OutExpr -> OutExpr -> CSEnv -extendCSEnv cse@(CS { cs_map = oldmap }) expr expr' - = cse { cs_map = addToUFM_C combine oldmap hash [(expr, expr')] } - where - hash = hashExpr expr - combine old new - = WARN( result `lengthExceeds` 4, short_msg $$ nest 2 long_msg ) result - where - result = new ++ old - short_msg = ptext (sLit "extendCSEnv: long list, length") <+> int (length result) - long_msg | opt_PprStyle_Debug = (text "hash code" <+> text (show hash)) $$ ppr result - | otherwise = empty - -#else ------------- NEW ---------------- - data CSEnv = CS { cs_map :: CoreMap (OutExpr, OutExpr) -- Key, value , cs_subst :: Subst } @@ -359,7 +281,7 @@ emptyCSEnv :: CSEnv emptyCSEnv = CS { cs_map = emptyCoreMap, cs_subst = emptySubst } lookupCSEnv :: CSEnv -> OutExpr -> Maybe OutExpr -lookupCSEnv (CS { cs_map = csmap }) expr +lookupCSEnv (CS { cs_map = csmap }) expr = case lookupCoreMap csmap expr of Just (_,e) -> Just e Nothing -> Nothing @@ -375,7 +297,6 @@ addCSEnvItem = extendCSEnv extendCSEnv :: CSEnv -> OutExpr -> OutExpr -> CSEnv extendCSEnv cse expr expr' = cse { cs_map = extendCoreMap (cs_map cse) expr (expr,expr') } -#endif csEnvSubst :: CSEnv -> Subst csEnvSubst = cs_subst @@ -387,17 +308,17 @@ extendCSSubst :: CSEnv -> Id -> Id -> CSEnv extendCSSubst cse x y = cse { cs_subst = extendIdSubst (cs_subst cse) x (Var y) } addBinder :: CSEnv -> Var -> (CSEnv, Var) -addBinder cse v = (cse { cs_subst = sub' }, v') +addBinder cse v = (cse { cs_subst = sub' }, v') where (sub', v') = substBndr (cs_subst cse) v addBinders :: CSEnv -> [Var] -> (CSEnv, [Var]) -addBinders cse vs = (cse { cs_subst = sub' }, vs') +addBinders cse vs = (cse { cs_subst = sub' }, vs') where (sub', vs') = substBndrs (cs_subst cse) vs addRecBinders :: CSEnv -> [Id] -> (CSEnv, [Id]) -addRecBinders cse vs = (cse { cs_subst = sub' }, vs') +addRecBinders cse vs = (cse { cs_subst = sub' }, vs') where (sub', vs') = substRecBndrs (cs_subst cse) vs \end{code} diff --git a/compiler/simplCore/CoreMonad.lhs b/compiler/simplCore/CoreMonad.lhs index 3afb8cdf5d29..e11c139e8531 100644 --- a/compiler/simplCore/CoreMonad.lhs +++ b/compiler/simplCore/CoreMonad.lhs @@ -762,7 +762,8 @@ instance Monad CoreM where mx >>= f = CoreM $ \s -> do (x, s', w1) <- unCoreM mx s (y, s'', w2) <- unCoreM (f x) s' - return (y, s'', w1 `plusWriter` w2) + let w = w1 `plusWriter` w2 -- forcing w before returning avoids a space leak (Trac #7702) + return $ seq w (y, s'', w) instance Applicative CoreM where pure = return diff --git a/compiler/simplCore/OccurAnal.lhs b/compiler/simplCore/OccurAnal.lhs index 2c2707016653..13e468f68580 100644 --- a/compiler/simplCore/OccurAnal.lhs +++ b/compiler/simplCore/OccurAnal.lhs @@ -31,10 +31,9 @@ import Coercion import VarSet import VarEnv import Var - +import Demand ( argOneShots, argsOneShots ) import Maybes ( orElse ) import Digraph ( SCC(..), stronglyConnCompFromEdgedVerticesR ) -import PrelNames ( buildIdKey, foldrIdKey, runSTRepIdKey, augmentIdKey ) import Unique import UniqFM import Util @@ -138,7 +137,7 @@ occAnalBind env _ imp_rules_edges (NonRec binder rhs) body_usage = (body_usage' +++ rhs_usage4, [NonRec tagged_binder rhs']) where (body_usage', tagged_binder) = tagBinder body_usage binder - (rhs_usage1, rhs') = occAnalRhs env (Just tagged_binder) rhs + (rhs_usage1, rhs') = occAnalNonRecRhs env tagged_binder rhs rhs_usage2 = addIdOccs rhs_usage1 (idUnfoldingVars binder) rhs_usage3 = addIdOccs rhs_usage2 (idRuleVars binder) -- See Note [Rules are extra RHSs] and Note [Rule dependency info] @@ -665,7 +664,7 @@ makeNode env imp_rules_edges bndr_set (bndr, rhs) -- Constructing the edges for the main Rec computation -- See Note [Forming Rec groups] - (rhs_usage1, rhs') = occAnalRhs env Nothing rhs + (rhs_usage1, rhs') = occAnalRecRhs env rhs rhs_usage2 = addIdOccs rhs_usage1 all_rule_fvs -- Note [Rules are extra RHSs] -- Note [Rule dependency info] rhs_usage3 = case mb_unf_fvs of @@ -692,7 +691,7 @@ makeNode env imp_rules_edges bndr_set (bndr, rhs) -- Finding the free variables of the INLINE pragma (if any) unf = realIdUnfolding bndr -- Ignore any current loop-breaker flag - mb_unf_fvs = stableUnfoldingVars isLocalId unf + mb_unf_fvs = stableUnfoldingVars unf -- Find the "nd_inl" free vars; for the loop-breaker phase inl_fvs = case mb_unf_fvs of @@ -1065,28 +1064,36 @@ ToDo: try using the occurrence info for the inline'd binder. \begin{code} -occAnalRhs :: OccEnv - -> Maybe Id -> CoreExpr -- Binder and rhs - -- Just b => non-rec, and alrady tagged with occurrence info - -- Nothing => Rec, no occ info +occAnalRecRhs :: OccEnv -> CoreExpr -- Rhs -> (UsageDetails, CoreExpr) -- Returned usage details covers only the RHS, -- and *not* the RULE or INLINE template for the Id -occAnalRhs env mb_bndr rhs - = occAnal ctxt rhs +occAnalRecRhs env rhs = occAnal (rhsCtxt env) rhs + +occAnalNonRecRhs :: OccEnv + -> Id -> CoreExpr -- Binder and rhs + -- Binder is already tagged with occurrence info + -> (UsageDetails, CoreExpr) + -- Returned usage details covers only the RHS, + -- and *not* the RULE or INLINE template for the Id +occAnalNonRecRhs env bndr rhs + = occAnal rhs_env rhs where + -- See Note [Use one-shot info] + env1 = env { occ_one_shots = argOneShots dmd } + -- See Note [Cascading inlines] - ctxt = case mb_bndr of - Just b | certainly_inline b -> env - _other -> rhsCtxt env + rhs_env | certainly_inline = env1 + | otherwise = rhsCtxt env1 - certainly_inline bndr -- See Note [Cascading inlines] + certainly_inline -- See Note [Cascading inlines] = case idOccInfo bndr of OneOcc in_lam one_br _ -> not in_lam && one_br && active && not_stable _ -> False - where - active = isAlwaysActive (idInlineActivation bndr) - not_stable = not (isStableUnfolding (idUnfolding bndr)) + + dmd = idDemandInfo bndr + active = isAlwaysActive (idInlineActivation bndr) + not_stable = not (isStableUnfolding (idUnfolding bndr)) addIdOccs :: UsageDetails -> VarSet -> UsageDetails addIdOccs usage id_set = foldVarSet add usage id_set @@ -1223,24 +1230,13 @@ occAnal env expr@(Lam _ _) (final_usage, tagged_binders) = tagLamBinders body_usage binders' -- Use binders' to put one-shot info on the lambdas - -- URGH! Sept 99: we don't seem to be able to use binders' here, because - -- we get linear-typed things in the resulting program that we can't handle yet. - -- (e.g. PrelShow) TODO - - really_final_usage = if linear then - final_usage - else - mapVarEnv markInsideLam final_usage + really_final_usage | linear = final_usage + | otherwise = mapVarEnv markInsideLam final_usage in - (really_final_usage, - mkLams tagged_binders body') } + (really_final_usage, mkLams tagged_binders body') } where - env_body = vanillaCtxt env - -- Body is (no longer) an RhsContext - (binders, body) = collectBinders expr - binders' = oneShotGroup env binders - linear = all is_one_shot binders' - is_one_shot b = isId b && isOneShotBndr b + (binders, body) = collectBinders expr + (env_body, binders', linear) = oneShotGroup env binders occAnal env (Case scrut bndr ty alts) = case occ_anal_scrut scrut alts of { (scrut_usage, scrut') -> @@ -1282,12 +1278,20 @@ occAnal env (Let bind body) case occAnalBind env env emptyVarEnv bind body_usage of { (final_usage, new_binds) -> (final_usage, mkLets new_binds body') }} -occAnalArgs :: OccEnv -> [CoreExpr] -> (UsageDetails, [CoreExpr]) -occAnalArgs env args - = case mapAndUnzip (occAnal arg_env) args of { (arg_uds_s, args') -> - (foldr (+++) emptyDetails arg_uds_s, args')} - where - arg_env = vanillaCtxt env +occAnalArgs :: OccEnv -> [CoreExpr] -> [OneShots] -> (UsageDetails, [CoreExpr]) +occAnalArgs _ [] _ + = (emptyDetails, []) + +occAnalArgs env (arg:args) one_shots + | isTypeArg arg + = case occAnalArgs env args one_shots of { (uds, args') -> + (uds, arg:args') } + + | otherwise + = case argCtxt env one_shots of { (arg_env, one_shots') -> + case occAnal arg_env arg of { (uds1, arg') -> + case occAnalArgs env args one_shots' of { (uds2, args') -> + (uds1 +++ uds2, arg':args') }}} \end{code} Applications are dealt with specially because we want @@ -1324,27 +1328,23 @@ occAnalApp env (Var fun, args) in (fun_uds +++ final_args_uds, mkApps (Var fun) args') } where - fun_uniq = idUnique fun fun_uds = mkOneOcc env fun (valArgCount args > 0) is_exp = isExpandableApp fun (valArgCount args) -- See Note [CONLIKE pragma] in BasicTypes -- The definition of is_exp should match that in -- Simplify.prepareRhs - -- Hack for build, fold, runST - args_stuff | fun_uniq == buildIdKey = appSpecial env 2 [True,True] args - | fun_uniq == augmentIdKey = appSpecial env 2 [True,True] args - | fun_uniq == foldrIdKey = appSpecial env 3 [False,True] args - | fun_uniq == runSTRepIdKey = appSpecial env 2 [True] args + one_shots = argsOneShots (idStrictness fun) (valArgCount args) + -- See Note [Use one-shot info] + + args_stuff = occAnalArgs env args one_shots + -- (foldr k z xs) may call k many times, but it never -- shares a partial application of k; hence [False,True] -- This means we can optimise -- foldr (\x -> let v = ...x... in \y -> ...v...) z xs -- by floating in the v - | otherwise = occAnalArgs env args - - occAnalApp env (fun, args) = case occAnal (addAppCtxt env args) fun of { (fun_uds, fun') -> -- The addAppCtxt is a bit cunning. One iteration of the simplifier @@ -1354,11 +1354,8 @@ occAnalApp env (fun, args) -- thing much like a let. We do this by pushing some True items -- onto the context stack. - case occAnalArgs env args of { (args_uds, args') -> - let - final_uds = fun_uds +++ args_uds - in - (final_uds, mkApps fun' args') }} + case occAnalArgs env args [] of { (args_uds, args') -> + (fun_uds +++ args_uds, mkApps fun' args') }} markManyIf :: Bool -- If this is true @@ -1366,29 +1363,23 @@ markManyIf :: Bool -- If this is true -> UsageDetails markManyIf True uds = mapVarEnv markMany uds markManyIf False uds = uds +\end{code} -appSpecial :: OccEnv - -> Int -> CtxtTy -- Argument number, and context to use for it - -> [CoreExpr] - -> (UsageDetails, [CoreExpr]) -appSpecial env n ctxt args - = go n args - where - arg_env = vanillaCtxt env - - go _ [] = (emptyDetails, []) -- Too few args - - go 1 (arg:args) -- The magic arg - = case occAnal (setCtxtTy arg_env ctxt) arg of { (arg_uds, arg') -> - case occAnalArgs env args of { (args_uds, args') -> - (arg_uds +++ args_uds, arg':args') }} +Note [Use one-shot information] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The occurrrence analyser propagates one-shot-lambda information in two situation + * Applications: eg build (\cn -> blah) + Propagate one-shot info from the strictness signature of 'build' to + the \cn - go n (arg:args) - = case occAnal arg_env arg of { (arg_uds, arg') -> - case go (n-1) args of { (args_uds, args') -> - (arg_uds +++ args_uds, arg':args') }} -\end{code} + * Let-bindings: eg let f = \c. let ... in \n -> blah + in (build f, build f) + Propagate one-shot info from the demanand-info on 'f' to the + lambdas in its RHS (which may not be syntactically at the top) +Some of this is done by the demand analyser, but this way it happens +much earlier, taking advantage of the strictness signature of +imported functions. Note [Binders in case alternatives] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1441,10 +1432,10 @@ wrapProxy _ _ _ body_usg body \begin{code} data OccEnv - = OccEnv { occ_encl :: !OccEncl -- Enclosing context information - , occ_ctxt :: !CtxtTy -- Tells about linearity - , occ_gbl_scrut :: GlobalScruts - , occ_rule_act :: Activation -> Bool -- Which rules are active + = OccEnv { occ_encl :: !OccEncl -- Enclosing context information + , occ_one_shots :: !OneShots -- Tells about linearity + , occ_gbl_scrut :: GlobalScruts + , occ_rule_act :: Activation -> Bool -- Which rules are active -- See Note [Finding rule RHS free vars] , occ_binder_swap :: !Bool -- enable the binder_swap -- See CorePrep Note [Dead code in CorePrep] @@ -1471,7 +1462,7 @@ instance Outputable OccEncl where ppr OccRhs = ptext (sLit "occRhs") ppr OccVanilla = ptext (sLit "occVanilla") -type CtxtTy = [Bool] +type OneShots = [Bool] -- [] No info -- -- True:ctxt Analysing a function-valued expression that will be @@ -1479,51 +1470,66 @@ type CtxtTy = [Bool] -- -- False:ctxt Analysing a function-valued expression that may -- be applied many times; but when it is, - -- the CtxtTy inside applies + -- the OneShots inside applies initOccEnv :: (Activation -> Bool) -> OccEnv initOccEnv active_rule - = OccEnv { occ_encl = OccVanilla - , occ_ctxt = [] + = OccEnv { occ_encl = OccVanilla + , occ_one_shots = [] , occ_gbl_scrut = emptyVarSet -- PE emptyVarEnv emptyVarSet - , occ_rule_act = active_rule + , occ_rule_act = active_rule , occ_binder_swap = True } vanillaCtxt :: OccEnv -> OccEnv -vanillaCtxt env = env { occ_encl = OccVanilla, occ_ctxt = [] } +vanillaCtxt env = env { occ_encl = OccVanilla, occ_one_shots = [] } rhsCtxt :: OccEnv -> OccEnv -rhsCtxt env = env { occ_encl = OccRhs, occ_ctxt = [] } +rhsCtxt env = env { occ_encl = OccRhs, occ_one_shots = [] } -setCtxtTy :: OccEnv -> CtxtTy -> OccEnv -setCtxtTy env ctxt = env { occ_ctxt = ctxt } +argCtxt :: OccEnv -> [OneShots] -> (OccEnv, [OneShots]) +argCtxt env [] + = (env { occ_encl = OccVanilla, occ_one_shots = [] }, []) +argCtxt env (one_shots:one_shots_s) + = (env { occ_encl = OccVanilla, occ_one_shots = one_shots }, one_shots_s) isRhsEnv :: OccEnv -> Bool isRhsEnv (OccEnv { occ_encl = OccRhs }) = True isRhsEnv (OccEnv { occ_encl = OccVanilla }) = False -oneShotGroup :: OccEnv -> [CoreBndr] -> [CoreBndr] +oneShotGroup :: OccEnv -> [CoreBndr] + -> ( OccEnv + , [CoreBndr] + , Bool ) -- True <=> all binders are one-shot -- The result binders have one-shot-ness set that they might not have had originally. -- This happens in (build (\cn -> e)). Here the occurrence analyser -- linearity context knows that c,n are one-shot, and it records that fact in -- the binder. This is useful to guide subsequent float-in/float-out tranformations -oneShotGroup (OccEnv { occ_ctxt = ctxt }) bndrs - = go ctxt bndrs [] +oneShotGroup env@(OccEnv { occ_one_shots = ctxt }) bndrs + = go ctxt bndrs [] True where - go _ [] rev_bndrs = reverse rev_bndrs - - go (lin_ctxt:ctxt) (bndr:bndrs) rev_bndrs - | isId bndr = go ctxt bndrs (bndr':rev_bndrs) - where - bndr' | lin_ctxt = setOneShotLambda bndr - | otherwise = bndr - - go ctxt (bndr:bndrs) rev_bndrs = go ctxt bndrs (bndr:rev_bndrs) + go ctxt [] rev_bndrs linear + = ( env { occ_one_shots = ctxt, occ_encl = OccVanilla } + , reverse rev_bndrs + , linear ) + + go ctxt (bndr:bndrs) rev_bndrs lin_acc + | isId bndr + = case ctxt of + [] -> go [] bndrs (bndr:rev_bndrs) (lin_acc && one_shot) + (linear : ctxt) + | one_shot -> go ctxt bndrs (bndr : rev_bndrs) lin_acc + | linear -> go ctxt bndrs (bndr': rev_bndrs) lin_acc + | otherwise -> go ctxt bndrs (bndr : rev_bndrs) False + | otherwise + = go ctxt bndrs (bndr:rev_bndrs) lin_acc + where + one_shot = isOneShotBndr bndr + bndr' = setOneShotLambda bndr addAppCtxt :: OccEnv -> [Arg CoreBndr] -> OccEnv -addAppCtxt env@(OccEnv { occ_ctxt = ctxt }) args - = env { occ_ctxt = replicate (valArgCount args) True ++ ctxt } +addAppCtxt env@(OccEnv { occ_one_shots = ctxt }) args + = env { occ_one_shots = replicate (valArgCount args) True ++ ctxt } \end{code} @@ -1717,7 +1723,7 @@ information right. \begin{code} mkAltEnv :: OccEnv -> CoreExpr -> Id -> (OccEnv, Maybe (Id, CoreExpr)) --- Does two things: a) makes the occ_ctxt = OccVanilla +-- Does two things: a) makes the occ_one_shots = OccVanilla -- b) extends the GlobalScruts if possible -- c) returns a proxy mapping, binding the scrutinee -- to the case binder, if possible diff --git a/compiler/simplCore/SimplUtils.lhs b/compiler/simplCore/SimplUtils.lhs index 7bc10de43fcf..92874de4a3d5 100644 --- a/compiler/simplCore/SimplUtils.lhs +++ b/compiler/simplCore/SimplUtils.lhs @@ -15,15 +15,17 @@ module SimplUtils ( simplEnvForGHCi, updModeForInlineRules, -- The continuation type - SimplCont(..), DupFlag(..), ArgInfo(..), + SimplCont(..), DupFlag(..), isSimplified, contIsDupable, contResultType, contInputType, contIsTrivial, contArgs, dropArgs, - pushSimplifiedArgs, countValArgs, countArgs, addArgTo, + pushSimplifiedArgs, countValArgs, countArgs, mkBoringStop, mkRhsStop, mkLazyArgStop, contIsRhsOrArg, - interestingCallContext, + interestingCallContext, interestingArg, - interestingArg, mkArgInfo, + -- ArgInfo + ArgInfo(..), ArgSpec(..), mkArgInfo, addArgTo, addCastTo, + argInfoExpr, argInfoValArgs, abstractFloats ) where @@ -132,7 +134,7 @@ data SimplCont data ArgInfo = ArgInfo { ai_fun :: OutId, -- The function - ai_args :: [OutExpr], -- ...applied to these args (which are in *reverse* order) + ai_args :: [ArgSpec], -- ...applied to these args (which are in *reverse* order) ai_type :: OutType, -- Type of (f a1 ... an) ai_rules :: [CoreRule], -- Rules for this function @@ -149,10 +151,38 @@ data ArgInfo -- Always infinite } +data ArgSpec = ValArg OutExpr -- Apply to this + | CastBy OutCoercion -- Cast by this + +instance Outputable ArgSpec where + ppr (ValArg e) = ptext (sLit "ValArg") <+> ppr e + ppr (CastBy c) = ptext (sLit "CastBy") <+> ppr c + addArgTo :: ArgInfo -> OutExpr -> ArgInfo -addArgTo ai arg = ai { ai_args = arg : ai_args ai +addArgTo ai arg = ai { ai_args = ValArg arg : ai_args ai , ai_type = applyTypeToArg (ai_type ai) arg } +addCastTo :: ArgInfo -> OutCoercion -> ArgInfo +addCastTo ai co = ai { ai_args = CastBy co : ai_args ai + , ai_type = pSnd (coercionKind co) } + +argInfoValArgs :: SimplEnv -> [ArgSpec] -> SimplCont -> ([OutExpr], SimplCont) +argInfoValArgs env args cont + = go args [] cont + where + go :: [ArgSpec] -> [OutExpr] -> SimplCont -> ([OutExpr], SimplCont) + go (ValArg e : as) acc cont = go as (e:acc) cont + go (CastBy co : as) acc cont = go as [] (CoerceIt co (pushSimplifiedArgs env acc cont)) + go [] acc cont = (acc, cont) + +argInfoExpr :: OutId -> [ArgSpec] -> OutExpr +argInfoExpr fun args + = go args + where + go [] = Var fun + go (ValArg a : as) = go as `App` a + go (CastBy co : as) = mkCast (go as) co + instance Outputable SimplCont where ppr (Stop ty interesting) = ptext (sLit "Stop") <> brackets (ppr interesting) <+> ppr ty ppr (ApplyTo dup arg _ cont) = ((ptext (sLit "ApplyTo") <+> ppr dup <+> pprParendExpr arg) @@ -258,21 +288,27 @@ countArgs (ApplyTo _ _ _ cont) = 1 + countArgs cont countArgs _ = 0 contArgs :: SimplCont -> (Bool, [ArgSummary], SimplCont) --- Uses substitution to turn each arg into an OutExpr -contArgs cont@(ApplyTo {}) - = case go [] cont of { (args, cont') -> (False, args, cont') } +-- Summarises value args, discards type args and coercions +-- The returned continuation of the call is only used to +-- answer questions like "are you interesting?" +contArgs cont + | lone cont = (True, [], cont) + | otherwise = go [] cont where + lone (ApplyTo {}) = False -- See Note [Lone variables] in CoreUnfold + lone (CoerceIt {}) = False + lone _ = True + go args (ApplyTo _ arg se cont) - | isTypeArg arg = go args cont - | otherwise = go (is_interesting arg se : args) cont - go args cont = (reverse args, cont) + | isTypeArg arg = go args cont + | otherwise = go (is_interesting arg se : args) cont + go args (CoerceIt _ cont) = go args cont + go args cont = (False, reverse args, cont) is_interesting arg se = interestingArg (substExpr (text "contArgs") se arg) -- Do *not* use short-cutting substitution here -- because we want to get as much IdInfo as possible -contArgs cont = (True, [], cont) - pushSimplifiedArgs :: SimplEnv -> [CoreExpr] -> SimplCont -> SimplCont pushSimplifiedArgs _env [] cont = cont pushSimplifiedArgs env (arg:args) cont = ApplyTo Simplified arg env (pushSimplifiedArgs env args cont) @@ -641,19 +677,21 @@ activeUnfolding env where mode = getMode env -getUnfoldingInRuleMatch :: SimplEnv -> IdUnfoldingFun +getUnfoldingInRuleMatch :: SimplEnv -> InScopeEnv -- When matching in RULE, we want to "look through" an unfolding -- (to see a constructor) if *rules* are on, even if *inlinings* -- are not. A notable example is DFuns, which really we want to -- match in rules like (op dfun) in gentle mode. Another example -- is 'otherwise' which we want exprIsConApp_maybe to be able to -- see very early on -getUnfoldingInRuleMatch env id - | unf_is_active = idUnfolding id - | otherwise = NoUnfolding +getUnfoldingInRuleMatch env + = (in_scope, id_unf) where + in_scope = seInScope env mode = getMode env - unf_is_active + id_unf id | unf_is_active id = idUnfolding id + | otherwise = NoUnfolding + unf_is_active id | not (sm_rules mode) = active_unfolding_minimal id | otherwise = isActive (sm_phase mode) (idInlineActivation id) @@ -1124,6 +1162,7 @@ because the latter is not well-kinded. \begin{code} tryEtaExpand :: SimplEnv -> OutId -> OutExpr -> SimplM (Arity, OutExpr) -- See Note [Eta-expanding at let bindings] +-- and Note [Eta expansion to manifest arity] tryEtaExpand env bndr rhs = do { dflags <- getDynFlags ; (new_arity, new_rhs) <- try_expand dflags diff --git a/compiler/simplCore/Simplify.lhs b/compiler/simplCore/Simplify.lhs index d30e826f9337..f0f894d744f1 100644 --- a/compiler/simplCore/Simplify.lhs +++ b/compiler/simplCore/Simplify.lhs @@ -33,7 +33,6 @@ import CoreUtils import qualified CoreSubst import CoreArity import Rules ( lookupRule, getRules ) -import BasicTypes ( Arity ) import TysPrim ( realWorldStatePrimTy ) import BasicTypes ( TopLevelFlag(..), isTopLevel, RecFlag(..) ) import MonadUtils ( foldlM, mapAccumLM, liftIO ) @@ -537,6 +536,11 @@ These strange casts can happen as a result of case-of-case \begin{code} +makeTrivialArg :: SimplEnv -> ArgSpec -> SimplM (SimplEnv, ArgSpec) +makeTrivialArg env (ValArg e) = do { (env', e') <- makeTrivial NotTopLevel env e + ; return (env', ValArg e') } +makeTrivialArg env (CastBy co) = return (env, CastBy co) + makeTrivial :: TopLevelFlag -> SimplEnv -> OutExpr -> SimplM (SimplEnv, OutExpr) -- Binds the expression to a variable, if it's not trivial, returning the variable makeTrivial top_lvl env expr = makeTrivialWithInfo top_lvl env vanillaIdInfo expr @@ -723,10 +727,10 @@ simplUnfolding :: SimplEnv-> TopLevelFlag -> OutExpr -> Unfolding -> SimplM Unfolding -- Note [Setting the new unfolding] -simplUnfolding env _ _ _ (DFunUnfolding ar con ops) - = return (DFunUnfolding ar con ops') - where - ops' = map (fmap (substExpr (text "simplUnfolding") env)) ops +simplUnfolding env _ _ _ df@(DFunUnfolding { df_bndrs = bndrs, df_args = args }) + = do { (env', bndrs') <- simplBinders env bndrs + ; args' <- mapM (simplExpr env') args + ; return (df { df_bndrs = bndrs', df_args = args' }) } simplUnfolding env top_lvl id _ (CoreUnfolding { uf_tmpl = expr, uf_arity = arity @@ -1394,12 +1398,6 @@ completeCall env var cont = do { ------------- Try inlining ---------------- dflags <- getDynFlags ; let (lone_variable, arg_infos, call_cont) = contArgs cont - -- The args are OutExprs, obtained by *lazily* substituting - -- in the args found in cont. These args are only examined - -- to limited depth (unless a rule fires). But we must do - -- the substitution; rule matching on un-simplified args would - -- be bogus - n_val_args = length arg_infos interesting_cont = interestingCallContext call_cont unfolding = activeUnfolding env var @@ -1448,9 +1446,12 @@ rebuildCall env (ArgInfo { ai_fun = fun, ai_args = rev_args, ai_strs = [] }) con | not (contIsTrivial cont) -- Only do this if there is a non-trivial = return (env, castBottomExpr res cont_ty) -- contination to discard, else we do it where -- again and again! - res = mkApps (Var fun) (reverse rev_args) + res = argInfoExpr fun rev_args cont_ty = contResultType cont +rebuildCall env info (CoerceIt co cont) + = rebuildCall env (addCastTo info co) cont + rebuildCall env info (ApplyTo dup_flag (Type arg_ty) se cont) = do { arg_ty' <- if isSimplified dup_flag then return arg_ty else simplType (se `setInScope` env) arg_ty @@ -1482,17 +1483,21 @@ rebuildCall env info@(ArgInfo { ai_encl = encl_rules, ai_type = fun_ty | otherwise = BoringCtxt -- Nothing interesting rebuildCall env (ArgInfo { ai_fun = fun, ai_args = rev_args, ai_rules = rules }) cont + | null rules + = rebuild env (argInfoExpr fun rev_args) cont -- No rules, common case + + | otherwise = do { -- We've accumulated a simplified call in -- so try rewrite rules; see Note [RULEs apply to simplified arguments] -- See also Note [Rules for recursive functions] - ; let args = reverse rev_args - env' = zapSubstEnv env - ; mb_rule <- tryRules env rules fun args cont + ; let env' = zapSubstEnv env + (args, cont') = argInfoValArgs env' rev_args cont + ; mb_rule <- tryRules env' rules fun args cont' ; case mb_rule of { - Just (n_args, rule_rhs) -> simplExprF env' rule_rhs $ - pushSimplifiedArgs env' (drop n_args args) cont ; - -- n_args says how many args the rule consumed - ; Nothing -> rebuild env (mkApps (Var fun) args) cont -- No rules + Just (rule_rhs, cont'') -> simplExprF env' rule_rhs cont'' + + -- Rules don't match + ; Nothing -> rebuild env (argInfoExpr fun rev_args) cont -- No rules } } \end{code} @@ -1552,22 +1557,25 @@ all this at once is TOO HARD! \begin{code} tryRules :: SimplEnv -> [CoreRule] -> Id -> [OutExpr] -> SimplCont - -> SimplM (Maybe (Arity, CoreExpr)) -- The arity is the number of - -- args consumed by the rule + -> SimplM (Maybe (CoreExpr, SimplCont)) +-- The SimplEnv already has zapSubstEnv applied to it + tryRules env rules fn args call_cont | null rules = return Nothing | otherwise = do { dflags <- getDynFlags - ; case lookupRule dflags (activeRule env) (getUnfoldingInRuleMatch env) - (getInScope env) fn args rules of { + ; case lookupRule dflags (getUnfoldingInRuleMatch env) (activeRule env) + fn args rules of { Nothing -> return Nothing ; -- No rule matches Just (rule, rule_rhs) -> - do { checkedTick (RuleFired (ru_name rule)) - ; dflags <- getDynFlags ; dump dflags rule rule_rhs - ; return (Just (ruleArity rule, rule_rhs)) }}} + ; let cont' = pushSimplifiedArgs env + (drop (ruleArity rule) args) + call_cont + -- (ruleArity rule) says how many args the rule consumed + ; return (Just (rule_rhs, cont')) }}} where dump dflags rule rule_rhs | dopt Opt_D_dump_rule_rewrites dflags @@ -1586,7 +1594,6 @@ tryRules env rules fn args call_cont log_rule dflags flag hdr details = liftIO . dumpSDoc dflags flag "" $ sep [text hdr, nest 4 details] - \end{code} Note [Rules for recursive functions] @@ -1858,17 +1865,16 @@ rebuildCase env scrut case_bndr [(_, bndrs, rhs)] cont rebuildCase env scrut case_bndr alts@[(_, bndrs, rhs)] cont | all isDeadBinder (case_bndr : bndrs) -- So this is just 'seq' = do { let rhs' = substExpr (text "rebuild-case") env rhs + env' = zapSubstEnv env out_args = [Type (substTy env (idType case_bndr)), Type (exprType rhs'), scrut, rhs'] -- Lazily evaluated, so we don't do most of this ; rule_base <- getSimplRules - ; mb_rule <- tryRules env (getRules rule_base seqId) seqId out_args cont + ; mb_rule <- tryRules env' (getRules rule_base seqId) seqId out_args cont ; case mb_rule of - Just (n_args, res) -> simplExprF (zapSubstEnv env) - (mkApps res (drop n_args out_args)) - cont - Nothing -> reallyRebuildCase env scrut case_bndr alts cont } + Just (rule_rhs, cont') -> simplExprF env' rule_rhs cont' + Nothing -> reallyRebuildCase env scrut case_bndr alts cont } rebuildCase env scrut case_bndr alts cont = reallyRebuildCase env scrut case_bndr alts cont @@ -2315,7 +2321,7 @@ mkDupableCont env cont@(StrictBind {}) mkDupableCont env (StrictArg info cci cont) -- See Note [Duplicating StrictArg] = do { (env', dup, nodup) <- mkDupableCont env cont - ; (env'', args') <- mapAccumLM (makeTrivial NotTopLevel) env' (ai_args info) + ; (env'', args') <- mapAccumLM makeTrivialArg env' (ai_args info) ; return (env'', StrictArg (info { ai_args = args' }) cci dup, nodup) } mkDupableCont env (ApplyTo _ arg se cont) diff --git a/compiler/specialise/Rules.lhs b/compiler/specialise/Rules.lhs index 9c473e5a3aa9..cc2cc19f9860 100644 --- a/compiler/specialise/Rules.lhs +++ b/compiler/specialise/Rules.lhs @@ -47,8 +47,8 @@ import Name ( Name, NamedThing(..) ) import NameEnv import Unify ( ruleMatchTyX, MatchEnv(..) ) import BasicTypes ( Activation, CompilerPhase, isActive ) -import DynFlags ( DynFlags ) import StaticFlags ( opt_PprStyle_Debug ) +import DynFlags ( DynFlags ) import Outputable import FastString import Maybes @@ -351,16 +351,14 @@ pprRuleBase rules = vcat [ pprRules (tidyRules emptyTidyEnv rs) -- supplied rules to this instance of an application in a given -- context, returning the rule applied and the resulting expression if -- successful. -lookupRule :: DynFlags - -> (Activation -> Bool) -- When rule is active - -> IdUnfoldingFun -- When Id can be unfolded - -> InScopeSet - -> Id -> [CoreExpr] - -> [CoreRule] -> Maybe (CoreRule, CoreExpr) +lookupRule :: DynFlags -> InScopeEnv + -> (Activation -> Bool) -- When rule is active + -> Id -> [CoreExpr] + -> [CoreRule] -> Maybe (CoreRule, CoreExpr) -- See Note [Extra args in rule matching] -- See comments on matchRule -lookupRule dflags is_active id_unf in_scope fn args rules +lookupRule dflags in_scope is_active fn args rules = -- pprTrace "matchRules" (ppr fn <+> ppr args $$ ppr rules ) $ case go [] rules of [] -> Nothing @@ -370,7 +368,7 @@ lookupRule dflags is_active id_unf in_scope fn args rules go :: [(CoreRule,CoreExpr)] -> [CoreRule] -> [(CoreRule,CoreExpr)] go ms [] = ms - go ms (r:rs) = case (matchRule dflags fn is_active id_unf in_scope args rough_args r) of + go ms (r:rs) = case (matchRule dflags in_scope is_active fn args rough_args r) of Just e -> go ((r,e):ms) rs Nothing -> -- pprTrace "match failed" (ppr r $$ ppr args $$ -- ppr [ (arg_id, unfoldingTemplate unf) @@ -418,7 +416,7 @@ isMoreSpecific (BuiltinRule {}) _ = False isMoreSpecific (Rule {}) (BuiltinRule {}) = True isMoreSpecific (Rule { ru_bndrs = bndrs1, ru_args = args1 }) (Rule { ru_bndrs = bndrs2, ru_args = args2 }) - = isJust (matchN id_unfolding_fun in_scope bndrs2 args2 args1) + = isJust (matchN (in_scope, id_unfolding_fun) bndrs2 args2 args1) where id_unfolding_fun _ = NoUnfolding -- Don't expand in templates in_scope = mkInScopeSet (mkVarSet bndrs1) @@ -447,9 +445,8 @@ to lookupRule are the result of a lazy substitution \begin{code} ------------------------------------ -matchRule :: DynFlags -> Id -> (Activation -> Bool) -> IdUnfoldingFun - -> InScopeSet - -> [CoreExpr] -> [Maybe Name] +matchRule :: DynFlags -> InScopeEnv -> (Activation -> Bool) + -> Id -> [CoreExpr] -> [Maybe Name] -> CoreRule -> Maybe CoreExpr -- If (matchRule rule args) returns Just (name,rhs) @@ -474,21 +471,21 @@ matchRule :: DynFlags -> Id -> (Activation -> Bool) -> IdUnfoldingFun -- Any 'surplus' arguments in the input are simply put on the end -- of the output. -matchRule dflags fn _is_active id_unf _in_scope args _rough_args +matchRule dflags rule_env _is_active fn args _rough_args (BuiltinRule { ru_try = match_fn }) -- Built-in rules can't be switched off, it seems - = case match_fn dflags fn id_unf args of + = case match_fn dflags rule_env fn args of Just expr -> Just expr Nothing -> Nothing -matchRule _ _ is_active id_unf in_scope args rough_args - (Rule { ru_act = act, ru_rough = tpl_tops, - ru_bndrs = tpl_vars, ru_args = tpl_args, - ru_rhs = rhs }) +matchRule _ in_scope is_active _ args rough_args + (Rule { ru_act = act, ru_rough = tpl_tops + , ru_bndrs = tpl_vars, ru_args = tpl_args + , ru_rhs = rhs }) | not (is_active act) = Nothing | ruleCantMatch tpl_tops rough_args = Nothing | otherwise - = case matchN id_unf in_scope tpl_vars tpl_args args of + = case matchN in_scope tpl_vars tpl_args args of Nothing -> Nothing Just (bind_wrapper, tpl_vals) -> Just (bind_wrapper $ rule_fn `mkApps` tpl_vals) @@ -497,8 +494,7 @@ matchRule _ _ is_active id_unf in_scope args rough_args -- We could do this when putting things into the rulebase, I guess --------------------------------------- -matchN :: IdUnfoldingFun - -> InScopeSet -- ^ In-scope variables +matchN :: InScopeEnv -> [Var] -- ^ Match template type variables -> [CoreExpr] -- ^ Match template -> [CoreExpr] -- ^ Target; can have more elements than the template @@ -508,7 +504,7 @@ matchN :: IdUnfoldingFun -- the entire result and what should be substituted for each template variable. -- Fail if there are two few actual arguments from the target to match the template -matchN id_unf in_scope tmpl_vars tmpl_es target_es +matchN (in_scope, id_unf) tmpl_vars tmpl_es target_es = do { subst <- go init_menv emptyRuleSubst tmpl_es target_es ; return (rs_binds subst, map (lookup_tmpl subst) tmpl_vars') } @@ -572,14 +568,15 @@ necessary; the renamed ones are the tmpl_vars' -- * The BindWrapper in a RuleSubst are the bindings floated out -- from nested matches; see the Let case of match, below -- -data RuleEnv = RV { rv_tmpls :: VarSet -- Template variables - , rv_lcl :: RnEnv2 -- Renamings for *local bindings* - -- (lambda/case) - , rv_fltR :: Subst -- Renamings for floated let-bindings - -- domain disjoint from envR of rv_lcl - -- See Note [Matching lets] - , rv_unf :: IdUnfoldingFun - } +data RuleMatchEnv + = RV { rv_tmpls :: VarSet -- Template variables + , rv_lcl :: RnEnv2 -- Renamings for *local bindings* + -- (lambda/case) + , rv_fltR :: Subst -- Renamings for floated let-bindings + -- domain disjoint from envR of rv_lcl + -- See Note [Matching lets] + , rv_unf :: IdUnfoldingFun + } data RuleSubst = RS { rs_tv_subst :: TvSubstEnv -- Range is the , rs_id_subst :: IdSubstEnv -- template variables @@ -604,7 +601,7 @@ emptyRuleSubst = RS { rs_tv_subst = emptyVarEnv, rs_id_subst = emptyVarEnv -- SLPJ July 99 -match :: RuleEnv +match :: RuleMatchEnv -> RuleSubst -> CoreExpr -- Template -> CoreExpr -- Target @@ -720,7 +717,7 @@ match _ _ _e1 _e2 = -- pprTrace "Failing at" ((text "e1:" <+> ppr _e1) $$ (text Nothing ------------- -match_co :: RuleEnv +match_co :: RuleMatchEnv -> RuleSubst -> Coercion -> Coercion @@ -736,7 +733,7 @@ match_co _ _ co1 _ -- Currently just deals with CoVarCo and Refl ------------- -rnMatchBndr2 :: RuleEnv -> RuleSubst -> Var -> Var -> RuleEnv +rnMatchBndr2 :: RuleMatchEnv -> RuleSubst -> Var -> Var -> RuleMatchEnv rnMatchBndr2 renv subst x1 x2 = renv { rv_lcl = rnBndr2 rn_env x1 x2 , rv_fltR = delBndr (rv_fltR renv) x2 } @@ -746,7 +743,7 @@ rnMatchBndr2 renv subst x1 x2 -- there are some floated let-bindings ------------------------------------------ -match_alts :: RuleEnv +match_alts :: RuleMatchEnv -> RuleSubst -> [CoreAlt] -- Template -> [CoreAlt] -- Target @@ -772,7 +769,7 @@ okToFloat rn_env bind_fvs not_captured fv = not (inRnEnvR rn_env fv) ------------------------------------------ -match_var :: RuleEnv +match_var :: RuleMatchEnv -> RuleSubst -> Var -- Template -> CoreExpr -- Target @@ -801,7 +798,7 @@ match_var renv@(RV { rv_tmpls = tmpls, rv_lcl = rn_env, rv_fltR = flt_env }) -- template x, so we must rename first! ------------------------------------------ -match_tmpl_var :: RuleEnv +match_tmpl_var :: RuleMatchEnv -> RuleSubst -> Var -- Template -> CoreExpr -- Target @@ -842,7 +839,7 @@ match_tmpl_var renv@(RV { rv_lcl = rn_env, rv_fltR = flt_env }) -- because no free var of e2' is in the rnEnvR of the envt ------------------------------------------ -match_ty :: RuleEnv +match_ty :: RuleMatchEnv -> RuleSubst -> Type -- Template -> Type -- Target @@ -1096,7 +1093,8 @@ ruleAppCheck_help env fn args rules = ptext (sLit "Rule") <+> doubleQuotes (ftext name) rule_info dflags rule - | Just _ <- matchRule dflags fn noBlackList (rc_id_unf env) emptyInScopeSet args rough_args rule + | Just _ <- matchRule dflags (emptyInScopeSet, rc_id_unf env) + noBlackList fn args rough_args rule = text "matches (which is very peculiar!)" rule_info _ (BuiltinRule {}) = text "does not match" diff --git a/compiler/specialise/Specialise.lhs b/compiler/specialise/Specialise.lhs index e6e4c4809297..a161444d7bcb 100644 --- a/compiler/specialise/Specialise.lhs +++ b/compiler/specialise/Specialise.lhs @@ -10,7 +10,7 @@ module Specialise ( specProgram ) where import Id import TcType hiding( substTy, extendTvSubstList ) -import Type( TyVar, isDictTy, mkPiTypes ) +import Type( TyVar, isDictTy, mkPiTypes, classifyPredType, PredTree(..), isIPClass ) import Coercion( Coercion ) import CoreMonad import qualified CoreSubst @@ -1044,7 +1044,8 @@ specCalls env rules_for_me calls_for_me fn rhs ; return (spec_rules, spec_defns, plusUDList spec_uds) } | otherwise -- No calls or RHS doesn't fit our preconceptions - = WARN( notNull calls_for_me, ptext (sLit "Missed specialisation opportunity for") + = WARN( not (exprIsTrivial rhs) && notNull calls_for_me, + ptext (sLit "Missed specialisation opportunity for") <+> ppr fn $$ _trace_doc ) -- Note [Specialisation shape] -- pprTrace "specDefn: none" (ppr fn <+> ppr calls_for_me) $ @@ -1077,8 +1078,9 @@ specCalls env rules_for_me calls_for_me fn rhs already_covered :: DynFlags -> [CoreExpr] -> Bool already_covered dflags args -- Note [Specialisations already covered] - = isJust (lookupRule dflags (const True) realIdUnfolding - (CoreSubst.substInScope (se_subst env)) + = isJust (lookupRule dflags + (CoreSubst.substInScope (se_subst env), realIdUnfolding) + (const True) fn args rules_for_me) mk_ty_args :: [Maybe Type] -> [TyVar] -> [CoreExpr] @@ -1429,6 +1431,18 @@ It's a silly exapmle, but we get where choose doesn't have any dict arguments. Thus far I have not tried to fix this (wait till there's a real example). +Mind you, then 'choose' will be inlined (since RHS is trivial) so +it doesn't matter. This comes up with single-method classes + + class C a where { op :: a -> a } + instance C a => C [a] where .... +==> + $fCList :: C a => C [a] + $fCList = $copList |> (...coercion>...) + ....(uses of $fCList at particular types)... + +So we suppress the WARN if the rhs is trivial. + Note [Inline specialisations] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Here is what we do with the InlinePragma of the original function @@ -1583,7 +1597,9 @@ mkCallUDs :: SpecEnv -> Id -> [CoreExpr] -> UsageDetails mkCallUDs env f args | not (want_calls_for f) -- Imported from elsewhere || null theta -- Not overloaded - || not (all type_determines_value theta) + = emptyUDs + + | not (all type_determines_value theta) || not (spec_tys `lengthIs` n_tyvars) || not ( dicts `lengthIs` n_dicts) || not (any (interestingDict env) dicts) -- Note [Interesting dictionary arguments] @@ -1611,14 +1627,36 @@ mkCallUDs env f args want_calls_for f = isLocalId f || isInlinablePragma (idInlinePragma f) - type_determines_value pred = isClassPred pred && not (isIPPred pred) - -- Only specialise if all overloading is on non-IP *class* params, - -- because these are the ones whose *type* determines their *value*. - -- In ptic, with implicit params, the type args - -- *don't* say what the value of the implicit param is! - -- See Trac #7101 + type_determines_value pred -- See Note [Type determines value] + = case classifyPredType pred of + ClassPred cls _ -> not (isIPClass cls) + TuplePred ps -> all type_determines_value ps + EqPred {} -> True + IrredPred {} -> True -- Things like (D []) where D is a + -- Constraint-ranged family; Trac #7785 \end{code} +Note [Type determines value] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Only specialise if all overloading is on non-IP *class* params, +because these are the ones whose *type* determines their *value*. In +parrticular, with implicit params, the type args *don't* say what the +value of the implicit param is! See Trac #7101 + +However, consider + type family D (v::*->*) :: Constraint + type instance D [] = () + f :: D v => v Char -> Int +If we see a call (f "foo"), we'll pass a "dictionary" + () |> (g :: () ~ D []) +and it's good to specialise f at this dictionary. + +So the question is: can an implicit parameter "hide inside" a +type-family constraint like (D a). Well, no. We don't allow + type instance D Maybe = ?x:Int +Hence the IrredPred case in type_determines_value. +See Trac #7785. + Note [Interesting dictionary arguments] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Consider this diff --git a/compiler/stgSyn/CoreToStg.lhs b/compiler/stgSyn/CoreToStg.lhs index 1e737f91e906..3febfb3b831e 100644 --- a/compiler/stgSyn/CoreToStg.lhs +++ b/compiler/stgSyn/CoreToStg.lhs @@ -38,6 +38,7 @@ import FastString import Util import DynFlags import ForeignCall +import Demand ( isSingleUsed ) import PrimOp ( PrimCall(..) ) \end{code} @@ -245,7 +246,7 @@ coreToTopStgRhs dflags this_mod scope_fv_info (bndr, rhs) = do { (new_rhs, rhs_fvs, _) <- coreToStgExpr rhs ; lv_info <- freeVarsToLiveVars rhs_fvs - ; let stg_rhs = mkTopStgRhs dflags this_mod rhs_fvs (mkSRT lv_info) bndr_info new_rhs + ; let stg_rhs = mkTopStgRhs dflags this_mod rhs_fvs (mkSRT lv_info) bndr bndr_info new_rhs stg_arity = stgRhsArity stg_rhs ; return (ASSERT2( arity_ok stg_arity, mk_arity_msg stg_arity) stg_rhs, rhs_fvs) } @@ -272,26 +273,31 @@ coreToTopStgRhs dflags this_mod scope_fv_info (bndr, rhs) ptext (sLit "STG arity:") <+> ppr stg_arity] mkTopStgRhs :: DynFlags -> Module -> FreeVarsInfo - -> SRT -> StgBinderInfo -> StgExpr + -> SRT -> Id -> StgBinderInfo -> StgExpr -> StgRhs -mkTopStgRhs _ _ rhs_fvs srt binder_info (StgLam bndrs body) +mkTopStgRhs _ _ rhs_fvs srt _ binder_info (StgLam bndrs body) = StgRhsClosure noCCS binder_info (getFVs rhs_fvs) ReEntrant srt bndrs body -mkTopStgRhs dflags this_mod _ _ _ (StgConApp con args) +mkTopStgRhs dflags this_mod _ _ _ _ (StgConApp con args) | not (isDllConApp dflags this_mod con args) -- Dynamic StgConApps are updatable = StgRhsCon noCCS con args -mkTopStgRhs _ _ rhs_fvs srt binder_info rhs +mkTopStgRhs _ _ rhs_fvs srt bndr binder_info rhs = StgRhsClosure noCCS binder_info (getFVs rhs_fvs) - Updatable + (getUpdateFlag bndr) srt [] rhs + +getUpdateFlag :: Id -> UpdateFlag +getUpdateFlag bndr + = if isSingleUsed (idDemandInfo bndr) + then SingleEntry else Updatable \end{code} @@ -781,27 +787,27 @@ coreToStgRhs :: FreeVarsInfo -- Free var info for the scope of the bi coreToStgRhs scope_fv_info binders (bndr, rhs) = do (new_rhs, rhs_fvs, rhs_escs) <- coreToStgExpr rhs lv_info <- freeVarsToLiveVars (binders `minusFVBinders` rhs_fvs) - return (mkStgRhs rhs_fvs (mkSRT lv_info) bndr_info new_rhs, + return (mkStgRhs rhs_fvs (mkSRT lv_info) bndr bndr_info new_rhs, rhs_fvs, lv_info, rhs_escs) where bndr_info = lookupFVInfo scope_fv_info bndr -mkStgRhs :: FreeVarsInfo -> SRT -> StgBinderInfo -> StgExpr -> StgRhs +mkStgRhs :: FreeVarsInfo -> SRT -> Id -> StgBinderInfo -> StgExpr -> StgRhs -mkStgRhs _ _ _ (StgConApp con args) = StgRhsCon noCCS con args +mkStgRhs _ _ _ _ (StgConApp con args) = StgRhsCon noCCS con args -mkStgRhs rhs_fvs srt binder_info (StgLam bndrs body) +mkStgRhs rhs_fvs srt _ binder_info (StgLam bndrs body) = StgRhsClosure noCCS binder_info (getFVs rhs_fvs) ReEntrant srt bndrs body -mkStgRhs rhs_fvs srt binder_info rhs +mkStgRhs rhs_fvs srt bndr binder_info rhs = StgRhsClosure noCCS binder_info (getFVs rhs_fvs) upd_flag srt [] rhs where - upd_flag = Updatable + upd_flag = getUpdateFlag bndr {- SDM: disabled. Eval/Apply can't handle functions with arity zero very well; and making these into simple non-updatable thunks breaks other @@ -1109,7 +1115,7 @@ plusFVInfo :: (Var, HowBound, StgBinderInfo) -> (Var, HowBound, StgBinderInfo) -> (Var, HowBound, StgBinderInfo) plusFVInfo (id1,hb1,info1) (id2,hb2,info2) - = ASSERT (id1 == id2 && hb1 `check_eq_how_bound` hb2) + = ASSERT(id1 == id2 && hb1 `check_eq_how_bound` hb2) (id1, hb1, combineStgBinderInfo info1 info2) -- The HowBound info for a variable in the FVInfo should be consistent diff --git a/compiler/stranal/DmdAnal.lhs b/compiler/stranal/DmdAnal.lhs index 0eca72fa00b1..572a39d0b7a6 100644 --- a/compiler/stranal/DmdAnal.lhs +++ b/compiler/stranal/DmdAnal.lhs @@ -23,17 +23,16 @@ import VarEnv import BasicTypes import FastString import Data.List -import DataCon ( dataConTyCon, dataConRepStrictness, isMarkedStrict ) +import DataCon import Id -import CoreUtils ( exprIsHNF, exprIsTrivial ) +import CoreUtils ( exprIsHNF, exprType, exprIsTrivial ) import PprCore -import UniqFM ( filterUFM ) import TyCon -import Pair -import Type ( eqType, tyConAppTyCon_maybe ) -import Coercion ( coercionKind ) +import Type ( eqType ) +-- import Pair +-- import Coercion ( coercionKind ) import Util -import Maybes ( orElse ) +import Maybes ( isJust, orElse ) import TysWiredIn ( unboxedPairDataCon ) import TysPrim ( realWorldStatePrimTy ) \end{code} @@ -45,7 +44,6 @@ import TysPrim ( realWorldStatePrimTy ) %************************************************************************ \begin{code} - dmdAnalProgram :: DynFlags -> CoreProgram -> IO CoreProgram dmdAnalProgram dflags binds = do { @@ -54,29 +52,27 @@ dmdAnalProgram dflags binds } where do_prog :: CoreProgram -> CoreProgram - do_prog binds = snd $ mapAccumL (dmdAnalTopBind dflags) emptySigEnv binds + do_prog binds = snd $ mapAccumL dmdAnalTopBind (emptyAnalEnv dflags) binds -- Analyse a (group of) top-level binding(s) -dmdAnalTopBind :: DynFlags - -> SigEnv +dmdAnalTopBind :: AnalEnv -> CoreBind - -> (SigEnv, CoreBind) -dmdAnalTopBind dflags sigs (NonRec id rhs) - = (sigs2, NonRec id2 rhs2) + -> (AnalEnv, CoreBind) +dmdAnalTopBind sigs (NonRec id rhs) + = (extendAnalEnv TopLevel sigs id sig, NonRec id2 rhs2) where - ( _, _, (_, rhs1)) = dmdAnalRhs dflags TopLevel NonRecursive (virgin sigs) (id, rhs) - (sigs2, _, (id2, rhs2)) = dmdAnalRhs dflags TopLevel NonRecursive (nonVirgin sigs) (id, rhs1) + ( _, _, _, rhs1) = dmdAnalRhs TopLevel Nothing sigs id rhs + (sig, _, id2, rhs2) = dmdAnalRhs TopLevel Nothing (nonVirgin sigs) id rhs1 -- Do two passes to improve CPR information -- See comments with ignore_cpr_info in mk_sig_ty -- and with extendSigsWithLam -dmdAnalTopBind dflags sigs (Rec pairs) +dmdAnalTopBind sigs (Rec pairs) = (sigs', Rec pairs') where - (sigs', _, pairs') = dmdFix dflags TopLevel (virgin sigs) pairs + (sigs', _, pairs') = dmdFix TopLevel sigs pairs -- We get two iterations automatically -- c.f. the NonRec case above - \end{code} %************************************************************************ @@ -101,90 +97,78 @@ c) The application rule wouldn't be right either Evaluating (f x) in a L demand does *not* cause evaluation of f in a C(L) demand! -Note [Always analyse in virgin pass] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Tricky point: make sure that we analyse in the 'virgin' pass. Consider - rec { f acc x True = f (...rec { g y = ...g... }...) - f acc x False = acc } -In the virgin pass for 'f' we'll give 'f' a very strict (bottom) type. -That might mean that we analyse the sub-expression containing the -E = "...rec g..." stuff in a bottom demand. Suppose we *didn't analyse* -E, but just retuned botType. - -Then in the *next* (non-virgin) iteration for 'f', we might analyse E -in a weaker demand, and that will trigger doing a fixpoint iteration -for g. But *because it's not the virgin pass* we won't start g's -iteration at bottom. Disaster. (This happened in $sfibToList' of -nofib/spectral/fibheaps.) - -So in the virgin pass we make sure that we do analyse the expression -at least once, to initialise its signatures. - \begin{code} -evalDmdAnal :: DynFlags -> AnalEnv -> CoreExpr -> (DmdType, CoreExpr) --- See Note [Ensure demand is strict] -evalDmdAnal dflags env e - | (res_ty, e') <- dmdAnal dflags env evalDmd e - = (deferType res_ty, e') - -simpleDmdAnal :: DynFlags -> AnalEnv -> DmdType -> CoreExpr -> (DmdType, CoreExpr) -simpleDmdAnal dflags env res_ty e - | ae_virgin env -- See Note [Always analyse in virgin pass] - , (_discarded_res_ty, e') <- dmdAnal dflags env evalDmd e - = (res_ty, e') - | otherwise - = (res_ty, e) - -dmdAnal :: DynFlags -> AnalEnv -> Demand -> CoreExpr -> (DmdType, CoreExpr) -dmdAnal dflags env dmd e - | isBotDmd dmd = simpleDmdAnal dflags env botDmdType e - | isAbsDmd dmd = simpleDmdAnal dflags env topDmdType e - | not (isStrictDmd dmd) = evalDmdAnal dflags env e - -dmdAnal _ _ _ (Lit lit) = (topDmdType, Lit lit) -dmdAnal _ _ _ (Type ty) = (topDmdType, Type ty) -- Doesn't happen, in fact -dmdAnal _ _ _ (Coercion co) = (topDmdType, Coercion co) - -dmdAnal _ env dmd (Var var) +dmdAnalThunk :: AnalEnv + -> Demand -- This one takes a *Demand* + -> CoreExpr -> (DmdType, CoreExpr) +dmdAnalThunk env dmd e + | exprIsTrivial e = dmdAnalStar env dmd e + | otherwise = dmdAnalStar env (oneifyDmd dmd) e + +-- Do not process absent demands +-- Otherwise act like in a normal demand analysis +-- See |-* relation in the companion paper +dmdAnalStar :: AnalEnv + -> Demand -- This one takes a *Demand* + -> CoreExpr -> (DmdType, CoreExpr) +dmdAnalStar env dmd e = toCleanDmd (dmdAnal env) dmd e + +-- Main Demand Analsysis machinery +dmdAnal :: AnalEnv + -> CleanDemand -- The main one takes a *CleanDemand* + -> CoreExpr -> (DmdType, CoreExpr) + +-- The CleanDemand is always strict and not absent +-- See Note [Ensure demand is strict] + +dmdAnal _ _ (Lit lit) = (topDmdType, Lit lit) +dmdAnal _ _ (Type ty) = (topDmdType, Type ty) -- Doesn't happen, in fact +dmdAnal _ _ (Coercion co) = (topDmdType, Coercion co) + +dmdAnal env dmd (Var var) = (dmdTransform env var dmd, Var var) -dmdAnal dflags env dmd (Cast e co) +dmdAnal env dmd (Cast e co) = (dmd_ty, Cast e' co) where - (dmd_ty, e') = dmdAnal dflags env dmd' e + (dmd_ty, e') = dmdAnal env dmd e + +{- ----- I don't get this, so commenting out ------- to_co = pSnd (coercionKind co) dmd' | Just tc <- tyConAppTyCon_maybe to_co - , isRecursiveTyCon tc = evalDmd + , isRecursiveTyCon tc = cleanEvalDmd | otherwise = dmd -- This coerce usually arises from a recursive -- newtype, and we don't want to look inside them -- for exactly the same reason that we don't look -- inside recursive products -- we might not reach -- a fixpoint. So revert to a vanilla Eval demand +-} -dmdAnal dflags env dmd (Tick t e) +dmdAnal env dmd (Tick t e) = (dmd_ty, Tick t e') where - (dmd_ty, e') = dmdAnal dflags env dmd e + (dmd_ty, e') = dmdAnal env dmd e -dmdAnal dflags env dmd (App fun (Type ty)) +dmdAnal env dmd (App fun (Type ty)) = (fun_ty, App fun' (Type ty)) where - (fun_ty, fun') = dmdAnal dflags env dmd fun + (fun_ty, fun') = dmdAnal env dmd fun -dmdAnal dflags sigs dmd (App fun (Coercion co)) +dmdAnal sigs dmd (App fun (Coercion co)) = (fun_ty, App fun' (Coercion co)) where - (fun_ty, fun') = dmdAnal dflags sigs dmd fun + (fun_ty, fun') = dmdAnal sigs dmd fun -- Lots of the other code is there to make this -- beautiful, compositional, application rule :-) -dmdAnal dflags env dmd (App fun arg) -- Non-type arguments - = let -- [Type arg handled above] - (fun_ty, fun') = dmdAnal dflags env (mkCallDmd dmd) fun - (arg_ty, arg') = dmdAnal dflags env arg_dmd arg +dmdAnal env dmd (App fun arg) -- Non-type arguments + = let -- [Type arg handled above] + call_dmd = mkCallDmd dmd + (fun_ty, fun') = dmdAnal env call_dmd fun (arg_dmd, res_ty) = splitDmdTy fun_ty + (arg_ty, arg') = dmdAnalThunk env arg_dmd arg in -- pprTrace "dmdAnal:app" (vcat -- [ text "dmd =" <+> ppr dmd @@ -196,37 +180,35 @@ dmdAnal dflags env dmd (App fun arg) -- Non-type arguments -- , text "overall res dmd_ty =" <+> ppr (res_ty `bothDmdType` arg_ty) ]) (res_ty `bothDmdType` arg_ty, App fun' arg') -dmdAnal dflags env dmd (Lam var body) +dmdAnal env dmd (Lam var body) | isTyVar var - = let - (body_ty, body') = dmdAnal dflags env dmd body + = let + (body_ty, body') = dmdAnal env dmd body in (body_ty, Lam var body') - | Just body_dmd <- peelCallDmd dmd -- A call demand: good! - = let - env' = extendSigsWithLam env var - (body_ty, body') = dmdAnal dflags env' body_dmd body - (lam_ty, var') = annotateLamIdBndr dflags env body_ty var - in - (lam_ty, Lam var' body') + | otherwise + = let (body_dmd, defer_me, one_shot) = peelCallDmd dmd + -- body_dmd - a demand to analyze the body + -- one_shot - one-shotness of the lambda + -- hence, cardinality of its free vars - | otherwise -- Not enough demand on the lambda; but do the body - = let -- anyway to annotate it and gather free var info - (body_ty, body') = dmdAnal dflags env evalDmd body - (lam_ty, var') = annotateLamIdBndr dflags env body_ty var + env' = extendSigsWithLam env var + (body_ty, body') = dmdAnal env' body_dmd body + (lam_ty, var') = annotateLamIdBndr env body_ty one_shot var in - (deferType lam_ty, Lam var' body') + (deferAndUse defer_me one_shot lam_ty, Lam var' body') -dmdAnal dflags env dmd (Case scrut case_bndr ty [alt@(DataAlt dc, _, _)]) +dmdAnal env dmd (Case scrut case_bndr ty [alt@(DataAlt dc, _, _)]) -- Only one alternative with a product constructor | let tycon = dataConTyCon dc , isProductTyCon tycon - , not (isRecursiveTyCon tycon) + , Just rec_tc' <- checkRecTc (ae_rec_tc env) tycon = let - env_alt = extendAnalEnv NotTopLevel env case_bndr case_bndr_sig - (alt_ty, alt') = dmdAnalAlt dflags env_alt dmd alt - (alt_ty1, case_bndr') = annotateBndr alt_ty case_bndr + env_w_tc = env { ae_rec_tc = rec_tc' } + env_alt = extendAnalEnv NotTopLevel env_w_tc case_bndr case_bndr_sig + (alt_ty, alt') = dmdAnalAlt env_alt dmd alt + (alt_ty1, case_bndr') = annotateBndr env alt_ty case_bndr (_, bndrs', _) = alt' case_bndr_sig = cprProdSig -- Inside the alternative, the case binder has the CPR property. @@ -258,17 +240,16 @@ dmdAnal dflags env dmd (Case scrut case_bndr ty [alt@(DataAlt dc, _, _)]) -- The above code would compute a Keep for x, since y is not Abs, which is silly -- The insight is, of course, that a demand on y is a demand on the -- scrutinee, so we need to `both` it with the scrut demand + + scrut_dmd1 = mkProdDmd [idDemandInfo b | b <- bndrs', isId b] + scrut_dmd2 = strictenDmd (idDemandInfo case_bndr') + scrut_dmd = scrut_dmd1 `bothCleanDmd` scrut_dmd2 - alt_dmd = mkProdDmd [idDemandInfo b | b <- bndrs', isId b] - scrut_dmd = alt_dmd `bothDmd` - idDemandInfo case_bndr' - - (scrut_ty, scrut') = dmdAnal dflags env scrut_dmd scrut + (scrut_ty, scrut') = dmdAnal env scrut_dmd scrut res_ty = alt_ty1 `bothDmdType` scrut_ty in -- pprTrace "dmdAnal:Case1" (vcat [ text "scrut" <+> ppr scrut -- , text "dmd" <+> ppr dmd --- , text "alt_dmd" <+> ppr alt_dmd -- , text "case_bndr_dmd" <+> ppr (idDemandInfo case_bndr') -- , text "scrut_dmd" <+> ppr scrut_dmd -- , text "scrut_ty" <+> ppr scrut_ty @@ -276,26 +257,32 @@ dmdAnal dflags env dmd (Case scrut case_bndr ty [alt@(DataAlt dc, _, _)]) -- , text "res_ty" <+> ppr res_ty ]) $ (res_ty, Case scrut' case_bndr' ty [alt']) -dmdAnal dflags env dmd (Case scrut case_bndr ty alts) +dmdAnal env dmd (Case scrut case_bndr ty alts) = let -- Case expression with multiple alternatives - (alt_tys, alts') = mapAndUnzip (dmdAnalAlt dflags env dmd) alts - (scrut_ty, scrut') = dmdAnal dflags env evalDmd scrut - (alt_ty, case_bndr') = annotateBndr (foldr lubDmdType botDmdType alt_tys) case_bndr - res_ty = alt_ty `bothDmdType` scrut_ty + (alt_tys, alts') = mapAndUnzip (dmdAnalAlt env dmd) alts + (scrut_ty, scrut') = dmdAnal env cleanEvalDmd scrut + (alt_ty, case_bndr') = annotateBndr env (foldr lubDmdType botDmdType alt_tys) case_bndr + res_ty = alt_ty `bothDmdType` scrut_ty in -- pprTrace "dmdAnal:Case2" (vcat [ text "scrut" <+> ppr scrut -- , text "scrut_ty" <+> ppr scrut_ty +-- , text "alt_tys" <+> ppr alt_tys -- , text "alt_ty" <+> ppr alt_ty -- , text "res_ty" <+> ppr res_ty ]) $ (res_ty, Case scrut' case_bndr' ty alts') -dmdAnal dflags env dmd (Let (NonRec id rhs) body) - = let - (sigs', lazy_fv, (id1, rhs')) = dmdAnalRhs dflags NotTopLevel NonRecursive env (id, rhs) - (body_ty, body') = dmdAnal dflags (updSigEnv env sigs') dmd body - (body_ty1, id2) = annotateBndr body_ty id1 - body_ty2 = addLazyFVs body_ty1 lazy_fv - in +dmdAnal env dmd (Let (NonRec id rhs) body) + = (body_ty2, Let (NonRec id2 annotated_rhs) body') + where + (sig, lazy_fv, id1, rhs') = dmdAnalRhs NotTopLevel Nothing env id rhs + (body_ty, body') = dmdAnal (extendAnalEnv NotTopLevel env id sig) dmd body + (body_ty1, id2) = annotateBndr env body_ty id1 + body_ty2 = addLazyFVs body_ty1 lazy_fv + + -- Annotate top-level lambdas at RHS basing on the aggregated demand info + -- See Note [Annotating lambdas at right-hand side] + annotated_rhs = annLamWithShotness (idDemandInfo id2) rhs' + -- If the actual demand is better than the vanilla call -- demand, you might think that we might do better to re-analyse -- the RHS with the stronger demand. @@ -308,32 +295,42 @@ dmdAnal dflags env dmd (Let (NonRec id rhs) body) -- In practice, all the times the actual demand on id2 is more than -- the vanilla call demand seem to be due to (b). So we don't -- bother to re-analyse the RHS. - (body_ty2, Let (NonRec id2 rhs') body') -dmdAnal dflags env dmd (Let (Rec pairs) body) +dmdAnal env dmd (Let (Rec pairs) body) = let - bndrs = map fst pairs - (sigs', lazy_fv, pairs') = dmdFix dflags NotTopLevel env pairs - (body_ty, body') = dmdAnal dflags (updSigEnv env sigs') dmd body - body_ty1 = addLazyFVs body_ty lazy_fv - in - sigs' `seq` body_ty `seq` - let - (body_ty2, _) = annotateBndrs body_ty1 bndrs - -- Don't bother to add demand info to recursive - -- binders as annotateBndr does; - -- being recursive, we can't treat them strictly. - -- But we do need to remove the binders from the result demand env + (env', lazy_fv, pairs') = dmdFix NotTopLevel env pairs + (body_ty, body') = dmdAnal env' dmd body + body_ty1 = deleteFVs body_ty (map fst pairs) + body_ty2 = addLazyFVs body_ty1 lazy_fv in + body_ty2 `seq` (body_ty2, Let (Rec pairs') body') +annLamWithShotness :: Demand -> CoreExpr -> CoreExpr +annLamWithShotness d e + | Just u <- cleanUseDmd_maybe d + = go u e + | otherwise = e + where + go u e + | Just (c, u') <- peelUseCall u + , Lam bndr body <- e + = if isTyVar bndr + then Lam bndr (go u body) + else Lam (setOneShotness c bndr) (go u' body) + | otherwise + = e + +setOneShotness :: Count -> Id -> Id +setOneShotness One bndr = setOneShotLambda bndr +setOneShotness Many bndr = bndr -dmdAnalAlt :: DynFlags -> AnalEnv -> Demand -> Alt Var -> (DmdType, Alt Var) -dmdAnalAlt dflags env dmd (con,bndrs,rhs) +dmdAnalAlt :: AnalEnv -> CleanDemand -> Alt Var -> (DmdType, Alt Var) +dmdAnalAlt env dmd (con,bndrs,rhs) = let - (rhs_ty, rhs') = dmdAnal dflags env dmd rhs + (rhs_ty, rhs') = dmdAnal env dmd rhs rhs_ty' = addDataConPatDmds con bndrs rhs_ty - (alt_ty, bndrs') = annotateBndrs rhs_ty' bndrs + (alt_ty, bndrs') = annotateBndrs env rhs_ty' bndrs final_alt_ty | io_hack_reqd = alt_ty `lubDmdType` topDmdType | otherwise = alt_ty @@ -358,7 +355,66 @@ dmdAnalAlt dflags env dmd (con,bndrs,rhs) idType (head bndrs) `eqType` realWorldStatePrimTy in (final_alt_ty, (con, bndrs', rhs')) +\end{code} +Note [Aggregated demand for cardinality] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +We use different strategies for strictness and usage/cardinality to +"unleash" demands captured on free variables by bindings. Let us +consider the example: + +f1 y = let {-# NOINLINE h #-} + h = y + in (h, h) + +We are interested in obtaining cardinality demand U1 on |y|, as it is +used only in a thunk, and, therefore, is not going to be updated any +more. Therefore, the demand on |y|, captured and unleashed by usage of +|h| is U1. However, if we unleash this demand every time |h| is used, +and then sum up the effects, the ultimate demand on |y| will be U1 + +U1 = U. In order to avoid it, we *first* collect the aggregate demand +on |h| in the body of let-expression, and only then apply the demand +transformer: + +transf[x](U) = {y |-> U1} + +so the resulting demand on |y| is U1. + +The situation is, however, different for strictness, where this +aggregating approach exhibits worse results because of the nature of +|both| operation for strictness. Consider the example: + +f y c = + let h x = y |seq| x + in case of + True -> h True + False -> y + +It is clear that |f| is strict in |y|, however, the suggested analysis +will infer from the body of |let| that |h| is used lazily (as it is +used in one branch only), therefore lazy demand will be put on its +free variable |y|. Conversely, if the demand on |h| is unleashed right +on the spot, we will get the desired result, namely, that |f| is +strict in |y|. + +Note [Annotating lambdas at right-hand side] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Let us take a look at the following example: + +g f = let x = 100 + h = \y -> f x y + in h 5 + +One can see that |h| is called just once, therefore the RHS of h can +be annotated as a one-shot lambda. This is done by the function +annLamWithShotness *a posteriori*, i.e., basing on the aggregated +usage demand on |h| from the body of |let|-expression, which is C1(U) +in this case. + +In other words, for locally-bound lambdas we can infer +one-shotness. + +\begin{code} addDataConPatDmds :: AltCon -> [Var] -> DmdType -> DmdType -- See Note [Add demands for strict constructors] addDataConPatDmds DEFAULT _ dmd_ty = dmd_ty @@ -366,14 +422,45 @@ addDataConPatDmds (LitAlt _) _ dmd_ty = dmd_ty addDataConPatDmds (DataAlt con) bndrs dmd_ty = foldr add dmd_ty str_bndrs where - add bndr dmd_ty = addVarDmd dmd_ty bndr absDmd + add bndr dmd_ty = addVarDmd dmd_ty bndr seqDmd str_bndrs = [ b | (b,s) <- zipEqual "addDataConPatBndrs" (filter isId bndrs) (dataConRepStrictness con) , isMarkedStrict s ] - \end{code} +Note [Add demands for strict constructors] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Consider this program (due to Roman): + + data X a = X !a + + foo :: X Int -> Int -> Int + foo (X a) n = go 0 + where + go i | i < n = a + go (i+1) + | otherwise = 0 + +We want the worker for 'foo' too look like this: + + $wfoo :: Int# -> Int# -> Int# + +with the first argument unboxed, so that it is not eval'd each time +around the loop (which would otherwise happen, since 'foo' is not +strict in 'a'. It is sound for the wrapper to pass an unboxed arg +because X is strict, so its argument must be evaluated. And if we +*don't* pass an unboxed argument, we can't even repair it by adding a +`seq` thus: + + foo (X a) n = a `seq` go 0 + +because the seq is discarded (very early) since X is strict! + +There is the usual danger of reboxing, which as usual we ignore. But +if X is monomorphic, and has an UNPACK pragma, then this optimisation +is even more important. We don't want the wrapper to rebox an unboxed +argument, and pass an Int to $wfoo! + %************************************************************************ %* * Demand transformer @@ -383,7 +470,7 @@ addDataConPatDmds (DataAlt con) bndrs dmd_ty \begin{code} dmdTransform :: AnalEnv -- The strictness environment -> Id -- The function - -> Demand -- The demand on the function + -> CleanDemand -- The demand on the function -> DmdType -- The demand type of the function in this context -- Returned DmdEnv includes the demand on -- this function plus demand on its free variables @@ -394,16 +481,19 @@ dmdTransform env var dmd (idArity var) (idStrictness var) dmd | isGlobalId var -- Imported function - = dmdTransformSig (idStrictness var) dmd + = let res = dmdTransformSig (idStrictness var) dmd in +-- pprTrace "dmdTransform" (vcat [ppr var, ppr (idStrictness var), ppr dmd, ppr res]) + res | Just (sig, top_lvl) <- lookupSigEnv env var -- Local letrec bound thing , let fn_ty = dmdTransformSig sig dmd - = if isTopLevel top_lvl - then fn_ty -- Don't record top level things - else addVarDmd fn_ty var dmd + = -- pprTrace "dmdTransform" (vcat [ppr var, ppr dmd, ppr fn_ty]) $ + if isTopLevel top_lvl + then fn_ty -- Don't record top level things + else addVarDmd fn_ty var (mkOnceUsedDmd dmd) | otherwise -- Local non-letrec-bound thing - = unitVarDmd var dmd + = unitVarDmd var (mkOnceUsedDmd dmd) \end{code} %************************************************************************ @@ -415,30 +505,31 @@ dmdTransform env var dmd \begin{code} -- Recursive bindings -dmdFix :: DynFlags - -> TopLevelFlag +dmdFix :: TopLevelFlag -> AnalEnv -- Does not include bindings for this binding -> [(Id,CoreExpr)] - -> (SigEnv, DmdEnv, + -> (AnalEnv, DmdEnv, [(Id,CoreExpr)]) -- Binders annotated with stricness info -dmdFix dflags top_lvl env orig_pairs - = loop 1 initial_env orig_pairs +dmdFix top_lvl env orig_pairs + = (updSigEnv env (sigEnv final_env), lazy_fv, pairs') + -- Return to original virgin state, keeping new signatures where bndrs = map fst orig_pairs initial_env = addInitialSigs top_lvl env bndrs + (final_env, lazy_fv, pairs') = loop 1 initial_env orig_pairs loop :: Int -> AnalEnv -- Already contains the current sigs -> [(Id,CoreExpr)] - -> (SigEnv, DmdEnv, [(Id,CoreExpr)]) + -> (AnalEnv, DmdEnv, [(Id,CoreExpr)]) loop n env pairs = -- pprTrace "dmd loop" (ppr n <+> ppr bndrs $$ ppr env) $ loop' n env pairs loop' n env pairs | found_fixpoint - = (sigs', lazy_fv, pairs') + = (env', lazy_fv, pairs') -- Note: return pairs', not pairs. pairs' is the result of -- processing the RHSs with sigs (= sigs'), whereas pairs -- is the result of processing the RHSs with the *previous* @@ -446,11 +537,12 @@ dmdFix dflags top_lvl env orig_pairs | n >= 10 = pprTrace "dmdFix loop" (ppr n <+> (vcat - [ text "Sigs:" <+> ppr [ (id,lookupVarEnv sigs id, lookupVarEnv sigs' id) + [ text "Sigs:" <+> ppr [ (id,lookupVarEnv (sigEnv env) id, + lookupVarEnv (sigEnv env') id) | (id,_) <- pairs], text "env:" <+> ppr env, text "binds:" <+> pprCoreBinding (Rec pairs)])) - (sigEnv env, lazy_fv, orig_pairs) -- Safe output + (env, lazy_fv, orig_pairs) -- Safe output -- The lazy_fv part is really important! orig_pairs has no strictness -- info, including nothing about free vars. But if we have -- letrec f = ....y..... in ...f... @@ -458,21 +550,21 @@ dmdFix dflags top_lvl env orig_pairs -- otherwise y will get recorded as absent altogether | otherwise - = loop (n+1) (nonVirgin sigs') pairs' + = loop (n+1) (nonVirgin env') pairs' where - sigs = sigEnv env - found_fixpoint = all (same_sig sigs sigs') bndrs + found_fixpoint = all (same_sig (sigEnv env) (sigEnv env')) bndrs - ((sigs',lazy_fv), pairs') = mapAccumL my_downRhs (sigs, emptyDmdEnv) pairs + ((env',lazy_fv), pairs') = mapAccumL my_downRhs (env, emptyDmdEnv) pairs -- mapAccumL: Use the new signature to do the next pair -- The occurrence analyser has arranged them in a good order -- so this can significantly reduce the number of iterations needed - my_downRhs (sigs,lazy_fv) (id,rhs) - = ((sigs', lazy_fv'), pair') + my_downRhs (env, lazy_fv) (id,rhs) + = ((env', lazy_fv'), (id', rhs')) where - (sigs', lazy_fv1, pair') = dmdAnalRhs dflags top_lvl Recursive (updSigEnv env sigs) (id,rhs) - lazy_fv' = plusVarEnv_C bothDmd lazy_fv lazy_fv1 + (sig, lazy_fv1, id', rhs') = dmdAnalRhs top_lvl (Just bndrs) env id rhs + lazy_fv' = plusVarEnv_C bothDmd lazy_fv lazy_fv1 + env' = extendAnalEnv top_lvl env id sig same_sig sigs sigs' var = lookup sigs var == lookup sigs' var lookup sigs var = case lookupVarEnv sigs var of @@ -480,26 +572,104 @@ dmdFix dflags top_lvl env orig_pairs Nothing -> pprPanic "dmdFix" (ppr var) -- Non-recursive bindings -dmdAnalRhs :: DynFlags -> TopLevelFlag -> RecFlag - -> AnalEnv -> (Id, CoreExpr) - -> (SigEnv, DmdEnv, (Id, CoreExpr)) +dmdAnalRhs :: TopLevelFlag + -> Maybe [Id] -- Just bs <=> recursive, Nothing <=> non-recursive + -> AnalEnv -> Id -> CoreExpr + -> (StrictSig, DmdEnv, Id, CoreExpr) -- Process the RHS of the binding, add the strictness signature -- to the Id, and augment the environment with the signature as well. -dmdAnalRhs dflags top_lvl rec_flag env (id, rhs) - = (sigs', lazy_fv, (id', rhs')) - where - arity = idArity id -- The idArity should be up to date - -- The simplifier was run just beforehand - (rhs_dmd_ty, rhs') = dmdAnal dflags env (vanillaCall arity) rhs - (lazy_fv, sig_ty) = WARN( arity /= dmdTypeDepth rhs_dmd_ty && not (exprIsTrivial rhs), ppr id ) - -- The RHS can be eta-reduced to just a variable, - -- in which case we should not complain. - mkSigTy top_lvl rec_flag env id rhs rhs_dmd_ty - id' = id `setIdStrictness` sig_ty - sigs' = extendSigEnv top_lvl (sigEnv env) id sig_ty +dmdAnalRhs top_lvl rec_flag env id rhs + | Just fn <- unpackTrivial rhs -- See Note [Trivial right-hand sides] + , let fn_str = getStrictness env fn + = (fn_str, emptyDmdEnv, set_idStrictness env id fn_str, rhs) + | otherwise + = (sig_ty, lazy_fv, id', mkLams bndrs' body') + where + (bndrs, body) = collectBinders rhs + env_body = foldl extendSigsWithLam env bndrs + (body_dmd_ty, body') = dmdAnal env_body body_dmd body + (rhs_dmd_ty, bndrs') = annotateLamBndrs env body_dmd_ty bndrs + id' = set_idStrictness env id sig_ty + sig_ty = mkStrictSig (mkDmdType sig_fv rhs_dmds rhs_res') + -- See Note [NOINLINE and strictness] + + -- See Note [Product demands for function body] + body_dmd = case deepSplitProductType_maybe (exprType body) of + Nothing -> cleanEvalDmd + Just (dc, _, _, _) -> cleanEvalProdDmd (dataConRepArity dc) + + DmdType rhs_fv rhs_dmds rhs_res = rhs_dmd_ty + + -- See Note [Lazy and unleashable free variables] + -- See Note [Aggregated demand for cardinality] + rhs_fv1 = case rec_flag of + Just bs -> useEnv (delVarEnvList rhs_fv bs) + Nothing -> rhs_fv + + (lazy_fv, sig_fv) = splitFVs is_thunk rhs_fv1 + + rhs_res' | returnsCPR rhs_res + , discard_cpr_info = topRes + | otherwise = rhs_res + + discard_cpr_info = nested_sum || (is_thunk && not_strict) + nested_sum -- See Note [CPR for sum types ] + = not (isTopLevel top_lvl || returnsCPRProd rhs_res) + + -- See Note [CPR for thunks] + is_thunk = not (exprIsHNF rhs) + not_strict + = isTopLevel top_lvl -- Top level and recursive things don't + || isJust rec_flag -- get their demandInfo set at all + || not (isStrictDmd (idDemandInfo id) || ae_virgin env) + -- See Note [Optimistic CPR in the "virgin" case] + +unpackTrivial :: CoreExpr -> Maybe Id +-- Returns (Just v) if the arg is really equal to v, modulo +-- casts, type applications etc +-- See Note [Trivial right-hand sides] +unpackTrivial (Var v) = Just v +unpackTrivial (Cast e _) = unpackTrivial e +unpackTrivial (Lam v e) | isTyVar v = unpackTrivial e +unpackTrivial (App e a) | isTypeArg a = unpackTrivial e +unpackTrivial _ = Nothing \end{code} +Note [Trivial right-hand sides] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Consider + foo = plusInt |> co +where plusInt is an arity-2 function with known strictness. Clearly +we want plusInt's strictness to propagate to foo! But because it has +no manifest lambdas, it won't do so automatically. So we have a +special case for right-hand sides that are "trivial", namely variables, +casts, type applications, and the like. + +Note [Product demands for function body] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +This example comes from shootout/binary_trees: + + Main.check' = \ b z ds. case z of z' { I# ip -> + case ds_d13s of + Main.Nil -> z' + Main.Node s14k s14l s14m -> + Main.check' (not b) + (Main.check' b + (case b { + False -> I# (-# s14h s14k); + True -> I# (+# s14h s14k) + }) + s14l) + s14m } } } + +Here we *really* want to unbox z, even though it appears to be used boxed in +the Nil case. Partly the Nil case is not a hot path. But more specifically, +the whole function gets the CPR property if we do. + +So for the demand on the body of a RHS we use a product demand if it's +a product type. + %************************************************************************ %* * \subsection{Strictness signatures and types} @@ -516,19 +686,17 @@ addVarDmd (DmdType fv ds res) var dmd = DmdType (extendVarEnv_C bothDmd fv var dmd) ds res addLazyFVs :: DmdType -> DmdEnv -> DmdType -addLazyFVs (DmdType fv ds res) lazy_fvs - = DmdType both_fv1 ds res - where - both_fv = plusVarEnv_C bothDmd fv lazy_fvs - both_fv1 = modifyEnv (isBotRes res) (`bothDmd` botDmd) lazy_fvs fv both_fv - -- This modifyEnv is vital. Consider +addLazyFVs dmd_ty lazy_fvs + = dmd_ty `bothDmdType` mkDmdType lazy_fvs [] topRes + -- Using bothDmdType (rather than just both'ing the envs) + -- is vital. Consider -- let f = \x -> (x,y) -- in error (f 3) -- Here, y is treated as a lazy-fv of f, but we must `bothDmd` that L -- demand with the bottom coming up from 'error' -- -- I got a loop in the fixpointer without this, due to an interaction - -- with the lazy_fv filtering in mkSigTy. Roughly, it was + -- with the lazy_fv filtering in dmdAnalRhs. Roughly, it was -- letrec f n x -- = letrec g y = x `fatbar` -- letrec h z = z + ...g... @@ -543,13 +711,9 @@ addLazyFVs (DmdType fv ds res) lazy_fvs -- which floats out of the defn for h. Without the modifyEnv, that -- L demand doesn't get both'd with the Bot coming up from the inner -- call to f. So we just get an L demand for x for g. - -- - -- A better way to say this is that the lazy-fv filtering should give the - -- same answer as putting the lazy fv demands in the function's type. - -removeFV :: DmdEnv -> Var -> DmdResult -> (DmdEnv, Demand) -removeFV fv id res = -- pprTrace "rfv" (ppr id <+> ppr dmd $$ ppr fv) +peelFV :: DmdEnv -> Var -> DmdResult -> (DmdEnv, Demand) +peelFV fv id res = -- pprTrace "rfv" (ppr id <+> ppr dmd $$ ppr fv) (fv', dmd) where fv' = fv `delVarEnv` id @@ -570,73 +734,55 @@ possible to safely ignore non-mentioned variables (their joint demand is ). \begin{code} -annotateBndr :: DmdType -> Var -> (DmdType, Var) +annotateBndr :: AnalEnv -> DmdType -> Var -> (DmdType, Var) -- The returned env has the var deleted -- The returned var is annotated with demand info -- according to the result demand of the provided demand type -- No effect on the argument demands -annotateBndr dmd_ty@(DmdType fv ds res) var +annotateBndr env dmd_ty@(DmdType fv ds res) var | isTyVar var = (dmd_ty, var) - | otherwise = (DmdType fv' ds res, setIdDemandInfo var dmd) + | otherwise = (DmdType fv' ds res, set_idDemandInfo env var dmd) where - (fv', dmd) = removeFV fv var res + (fv', dmd) = peelFV fv var res -annotateBndrs :: DmdType -> [Var] -> (DmdType, [Var]) -annotateBndrs = mapAccumR annotateBndr +annotateBndrs :: AnalEnv -> DmdType -> [Var] -> (DmdType, [Var]) +annotateBndrs env = mapAccumR (annotateBndr env) + +annotateLamBndrs :: AnalEnv -> DmdType -> [Var] -> (DmdType, [Var]) +annotateLamBndrs env ty bndrs = mapAccumR annotate ty bndrs + where + annotate dmd_ty bndr + | isId bndr = annotateLamIdBndr env dmd_ty Many bndr + | otherwise = (dmd_ty, bndr) -annotateLamIdBndr :: DynFlags - -> AnalEnv +annotateLamIdBndr :: AnalEnv -> DmdType -- Demand type of body + -> Count -- One-shot-ness of the lambda -> Id -- Lambda binder -> (DmdType, -- Demand type of lambda Id) -- and binder annotated with demand -annotateLamIdBndr dflags env (DmdType fv ds res) id +annotateLamIdBndr env _dmd_ty@(DmdType fv ds res) one_shot id -- For lambdas we add the demand to the argument demands -- Only called for Ids = ASSERT( isId id ) - (final_ty, setIdDemandInfo id dmd) + -- pprTrace "annLamBndr" (vcat [ppr id, ppr _dmd_ty]) $ + (final_ty, setOneShotness one_shot (set_idDemandInfo env id dmd)) where -- Watch out! See note [Lambda-bound unfoldings] final_ty = case maybeUnfoldingTemplate (idUnfolding id) of Nothing -> main_ty Just unf -> main_ty `bothDmdType` unf_ty where - (unf_ty, _) = dmdAnal dflags env dmd unf + (unf_ty, _) = dmdAnalStar env dmd unf main_ty = DmdType fv' (dmd:ds) res - (fv', dmd) = removeFV fv id res + (fv', dmd) = peelFV fv id res -mkSigTy :: TopLevelFlag -> RecFlag -> AnalEnv -> Id -> - CoreExpr -> DmdType -> (DmdEnv, StrictSig) -mkSigTy top_lvl rec_flag env id rhs (DmdType fv dmds res) - = (lazy_fv, mkStrictSig dmd_ty) - -- See Note [NOINLINE and strictness] - where - dmd_ty = mkDmdType strict_fv dmds res' - - -- See Note [Lazy and strict free variables] - lazy_fv = filterUFM (not . isStrictDmd) fv - strict_fv = filterUFM isStrictDmd fv - - ignore_cpr_info = not (exprIsHNF rhs || thunk_cpr_ok) - res' | returnsCPR res - , not (isTopLevel top_lvl || returnsCPRProd res) - -- See Note [CPR for sum types ] - || ignore_cpr_info = topRes - | otherwise = res - - -- Is it okay or not to assign CPR - -- (not okay in the first pass) - thunk_cpr_ok -- See Note [CPR for thunks] - | isTopLevel top_lvl = False -- Top level things don't get - -- their demandInfo set at all - | isRec rec_flag = False -- Ditto recursive things - | ae_virgin env = True -- Optimistic, first time round - -- See Note [Optimistic CPR in the "virgin" case] - | isStrictDmd (idDemandInfo id) = True - | otherwise = False +deleteFVs :: DmdType -> [Var] -> DmdType +deleteFVs (DmdType fvs dmds res) bndrs + = DmdType (delVarEnvList fvs bndrs) dmds res \end{code} Note [CPR for sum types] @@ -728,7 +874,6 @@ NB: strictly_demanded is never true of a top-level Id, or of a recursive Id. Note [Optimistic CPR in the "virgin" case] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - Demand and strictness info are initialized by top elements. However, this prevents from inferring a CPR property in the first pass of the analyser, so we keep an explicit flag ae_virgin in the AnalEnv @@ -789,10 +934,9 @@ strictness. For example, if you have a function implemented by an error stub, but which has RULES, you may want it not to be eliminated in favour of error! -Note [Lazy and strict free variables] +Note [Lazy and unleasheable free variables] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -We put the strict FVs in the DmdType of the Id, so +We put the strict and once-used FVs in the DmdType of the Id, so that at its call sites we unleash demands on its strict fvs. An example is 'roll' in imaginary/wheel-sieve2 Something like this: @@ -842,9 +986,13 @@ forget that fact, otherwise we might make 'x' absent when it isn't. \begin{code} data AnalEnv - = AE { ae_sigs :: SigEnv - , ae_virgin :: Bool } -- True on first iteration only + = AE { ae_dflags :: DynFlags + , ae_sigs :: SigEnv + , ae_virgin :: Bool -- True on first iteration only -- See Note [Initialising strictness] + , ae_rec_tc :: RecTcChecker + } + -- We use the se_env to tell us whether to -- record info about a variable in the DmdEnv -- We do so if it's a LocalId, but not top-level @@ -860,6 +1008,10 @@ instance Outputable AnalEnv where [ ptext (sLit "ae_virgin =") <+> ppr virgin , ptext (sLit "ae_sigs =") <+> ppr env ]) +emptyAnalEnv :: DynFlags -> AnalEnv +emptyAnalEnv dflags = AE { ae_dflags = dflags, ae_sigs = emptySigEnv + , ae_virgin = True, ae_rec_tc = initRecTc } + emptySigEnv :: SigEnv emptySigEnv = emptyVarEnv @@ -879,6 +1031,12 @@ extendSigEnv top_lvl sigs var sig = extendVarEnv sigs var (sig, top_lvl) lookupSigEnv :: AnalEnv -> Id -> Maybe (StrictSig, TopLevelFlag) lookupSigEnv env id = lookupVarEnv (ae_sigs env) id +getStrictness :: AnalEnv -> Id -> StrictSig +getStrictness env fn + | isGlobalId fn = idStrictness fn + | Just (sig, _) <- lookupSigEnv env fn = sig + | otherwise = topSig + addInitialSigs :: TopLevelFlag -> AnalEnv -> [Id] -> AnalEnv -- See Note [Initialising strictness] addInitialSigs top_lvl env@(AE { ae_sigs = sigs, ae_virgin = virgin }) ids @@ -888,22 +1046,29 @@ addInitialSigs top_lvl env@(AE { ae_sigs = sigs, ae_virgin = virgin }) ids init_sig | virgin = \_ -> botSig | otherwise = idStrictness -virgin, nonVirgin :: SigEnv -> AnalEnv -virgin sigs = AE { ae_sigs = sigs, ae_virgin = True } -nonVirgin sigs = AE { ae_sigs = sigs, ae_virgin = False } +nonVirgin :: AnalEnv -> AnalEnv +nonVirgin env = env { ae_virgin = False } extendSigsWithLam :: AnalEnv -> Id -> AnalEnv -- Extend the AnalEnv when we meet a lambda binder extendSigsWithLam env id - | isStrictDmd dmd_info || ae_virgin env + | isId id + , isStrictDmd (idDemandInfo id) || ae_virgin env -- See Note [Optimistic CPR in the "virgin" case] -- See Note [Initial CPR for strict binders] , Just {} <- deepSplitProductType_maybe $ idType id = extendAnalEnv NotTopLevel env id cprProdSig - | otherwise = env - where - dmd_info = idDemandInfo id + | otherwise + = env + +set_idDemandInfo :: AnalEnv -> Id -> Demand -> Id +set_idDemandInfo env id dmd + = setIdDemandInfo id (zapDemand (ae_dflags env) dmd) + +set_idStrictness :: AnalEnv -> Id -> StrictSig -> Id +set_idStrictness env id sig + = setIdStrictness id (zapStrictSig (ae_dflags env) sig) \end{code} Note [Initial CPR for strict binders] diff --git a/compiler/typecheck/FamInst.lhs b/compiler/typecheck/FamInst.lhs index fecedd728d7d..0a567af16afc 100644 --- a/compiler/typecheck/FamInst.lhs +++ b/compiler/typecheck/FamInst.lhs @@ -11,7 +11,7 @@ The @FamInst@ type: family instance heads module FamInst ( checkFamInstConsistency, tcExtendLocalFamInstEnv, - tcLookupFamInst, tcLookupDataFamInst, + tcLookupFamInst, tcGetFamInstEnvs, newFamInst ) where @@ -224,53 +224,8 @@ tcLookupFamInst tycon tys (match:_) -> return $ Just match } - -tcLookupDataFamInst :: TyCon -> [Type] -> TcM (TyCon, [Type]) --- Find the instance of a data family --- Note [Looking up family instances for deriving] -tcLookupDataFamInst tycon tys - | not (isOpenFamilyTyCon tycon) - = return (tycon, tys) - | otherwise - = ASSERT( isAlgTyCon tycon ) - do { maybeFamInst <- tcLookupFamInst tycon tys - ; case maybeFamInst of - Nothing -> famInstNotFound tycon tys - Just (FamInstMatch { fim_instance = famInst - , fim_tys = tys }) - -> let tycon' = dataFamInstRepTyCon famInst - in return (tycon', tys) } - -famInstNotFound :: TyCon -> [Type] -> TcM a -famInstNotFound tycon tys - = failWithTc (ptext (sLit "No family instance for") - <+> quotes (pprTypeApp tycon tys)) \end{code} -Note [Looking up family instances for deriving] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -tcLookupFamInstExact is an auxiliary lookup wrapper which requires -that looked-up family instances exist. If called with a vanilla -tycon, the old type application is simply returned. - -If we have - data instance F () = ... deriving Eq - data instance F () = ... deriving Eq -then tcLookupFamInstExact will be confused by the two matches; -but that can't happen because tcInstDecls1 doesn't call tcDeriving -if there are any overlaps. - -There are two other things that might go wrong with the lookup. -First, we might see a standalone deriving clause - deriving Eq (F ()) -when there is no data instance F () in scope. - -Note that it's OK to have - data instance F [a] = ... - deriving Eq (F [(a,b)]) -where the match is not exact; the same holds for ordinary data types -with standalone deriving declrations. - %************************************************************************ %* * diff --git a/compiler/typecheck/TcCanonical.lhs b/compiler/typecheck/TcCanonical.lhs index db1c5a0a5500..c0c02fbcc06b 100644 --- a/compiler/typecheck/TcCanonical.lhs +++ b/compiler/typecheck/TcCanonical.lhs @@ -27,7 +27,7 @@ import OccName( OccName ) import Outputable import Control.Monad ( when ) import TysWiredIn ( eqTyCon ) - +import DynFlags( DynFlags ) import VarSet import TcSMonad import FastString @@ -180,7 +180,7 @@ canonicalize (CTyEqCan { cc_loc = d , cc_tyvar = tv , cc_rhs = xi }) = {-# SCC "canEqLeafTyVarEq" #-} - canEqLeafTyVarEq d ev tv xi + canEqLeafTyVar d ev tv xi canonicalize (CFunEqCan { cc_loc = d , cc_ev = ev @@ -188,7 +188,7 @@ canonicalize (CFunEqCan { cc_loc = d , cc_tyargs = xis1 , cc_rhs = xi2 }) = {-# SCC "canEqLeafFunEq" #-} - canEqLeafFunEq d ev fn xis1 xi2 + canEqLeafFun d ev fn xis1 xi2 canonicalize (CIrredEvCan { cc_ev = ev , cc_loc = d }) @@ -544,20 +544,20 @@ flatten loc f ctxt (TyConApp tc tys) -- cache as well when we interact an equality with the inert. -- The design choice is: do we keep the flat cache rewritten or not? -- For now I say we don't keep it fully rewritten. - do { traceTcS "flatten/flat-cache hit" $ ppr ctev - ; (rhs_xi,co) <- flatten loc f flav rhs_ty + do { (rhs_xi,co) <- flatten loc f flav rhs_ty ; let final_co = evTermCoercion (ctEvTerm ctev) `mkTcTransCo` mkTcSymCo co + ; traceTcS "flatten/flat-cache hit" $ (ppr ctev $$ ppr rhs_xi $$ ppr final_co) ; return (final_co, rhs_xi) } - _ -> do { traceTcS "flatten/flat-cache miss" $ ppr fam_ty - ; (ctev, rhs_xi) <- newFlattenSkolem ctxt fam_ty + _ -> do { (ctev, rhs_xi) <- newFlattenSkolem ctxt fam_ty ; let ct = CFunEqCan { cc_ev = ctev , cc_fun = tc , cc_tyargs = xi_args , cc_rhs = rhs_xi , cc_loc = loc } ; updWorkListTcS $ extendWorkListFunEq ct + ; traceTcS "flatten/flat-cache miss" $ (ppr fam_ty $$ ppr rhs_xi $$ ppr ctev) ; return (evTermCoercion (ctEvTerm ctev), rhs_xi) } } -- Emit the flat constraints @@ -609,13 +609,15 @@ flattenTyVar loc f ctxt tv | otherwise = do { mb_ty <- isFilledMetaTyVar_maybe tv ; case mb_ty of { - Just ty -> flatten loc f ctxt ty ; + Just ty -> do { traceTcS "Following filled tyvar" (ppr tv <+> equals <+> ppr ty) + ; flatten loc f ctxt ty } ; Nothing -> -- Try in ty_binds do { ty_binds <- getTcSTyBindsMap ; case lookupVarEnv ty_binds tv of { - Just (_tv,ty) -> flatten loc f ctxt ty ; + Just (_tv,ty) -> do { traceTcS "Following bound tyvar" (ppr tv <+> equals <+> ppr ty) + ; flatten loc f ctxt ty } ; -- NB: ty_binds coercions are all ReflCo, -- so no need to transitively compose co' with another coercion, -- unlike in 'flatten_from_inerts' @@ -626,7 +628,8 @@ flattenTyVar loc f ctxt tv ; let mco = tv_eq_subst ieqs tv -- co : v ~ ty ; case mco of { Just (co,ty) -> - do { (ty_final,co') <- flatten loc f ctxt ty + do { traceTcS "Following inert tyvar" (ppr tv <+> equals <+> ppr ty) + ; (ty_final,co') <- flatten loc f ctxt ty ; return (ty_final, co' `mkTcTransCo` mkTcSymCo co) } ; -- NB recursive call. -- Why? Because inert subst. non-idempotent, Note [Detailed InertCans Invariants] @@ -1015,9 +1018,14 @@ reOrient (FunCls {}) _ = False -- Fun/Other on rhs reOrient (VarCls {}) (FunCls {}) = True reOrient (VarCls {}) (OtherCls {}) = False reOrient (VarCls tv1) (VarCls tv2) - | isMetaTyVar tv2 && not (isMetaTyVar tv1) = True - | isFlatSkolTyVar tv2 && not (isFlatSkolTyVar tv1) = True -- Note [Eliminate flat-skols] - | otherwise = False + | not (k2 `isSubKind` k1), k1 `isSubKind` k2 = True -- Note [Kind orientation for CTyEqCan] + -- in TcRnTypes + | not (isMetaTyVar tv1), isMetaTyVar tv2 = True + | not (isFlatSkolTyVar tv1), isFlatSkolTyVar tv2 = True -- Note [Eliminate flat-skols] + | otherwise = False + where + k1 = tyVarKind tv1 + k2 = tyVarKind tv2 -- Just for efficiency, see CTyEqCan invariants ------------------ @@ -1060,18 +1068,14 @@ canEqLeaf loc ev s1 s2 canEqLeafOriented :: CtLoc -> CtEvidence -> TypeClassifier -> TcType -> TcS StopOrContinue -- By now s1 will either be a variable or a type family application --- Precondition: the LHS and RHS have `compatKind` kinds --- so we can safely generate a CTyEqCan or CFunEqCan -canEqLeafOriented loc ev (FunCls fn tys1) s2 = canEqLeafFunEq loc ev fn tys1 s2 -canEqLeafOriented loc ev (VarCls tv) s2 = canEqLeafTyVarEq loc ev tv s2 +canEqLeafOriented loc ev (FunCls fn tys1) s2 = canEqLeafFun loc ev fn tys1 s2 +canEqLeafOriented loc ev (VarCls tv) s2 = canEqLeafTyVar loc ev tv s2 canEqLeafOriented _ ev (OtherCls {}) _ = pprPanic "canEqLeafOriented" (ppr (ctEvPred ev)) -canEqLeafFunEq :: CtLoc -> CtEvidence - -> TyCon -> [TcType] -> TcType -> TcS StopOrContinue --- Precondition: LHS and RHS have compatible kinds --- (guaranteed by canEqLeaf0 -canEqLeafFunEq loc ev fn tys1 ty2 -- ev :: F tys1 ~ ty2 - = do { traceTcS "canEqLeafFunEq" $ pprEq (mkTyConApp fn tys1) ty2 +canEqLeafFun :: CtLoc -> CtEvidence + -> TyCon -> [TcType] -> TcType -> TcS StopOrContinue +canEqLeafFun loc ev fn tys1 ty2 -- ev :: F tys1 ~ ty2 + = do { traceTcS "canEqLeafFun" $ pprEq (mkTyConApp fn tys1) ty2 ; let flav = ctEvFlavour ev -- Flatten type function arguments @@ -1085,95 +1089,102 @@ canEqLeafFunEq loc ev fn tys1 ty2 -- ev :: F tys1 ~ ty2 ; let fam_head = mkTyConApp fn xis1 xco = mkHdEqPred ty2 (mkTcTyConAppCo fn cos1) co2 -- xco :: (F xis1 ~ xi2) ~ (F tys1 ~ ty2) - - ; checkKind loc ev fam_head xi2 xco $ \new_ev -> - continueWith (CFunEqCan { cc_ev = new_ev, cc_loc = loc - , cc_fun = fn, cc_tyargs = xis1, cc_rhs = xi2 }) } - -canEqLeafTyVarEq :: CtLoc -> CtEvidence - -> TcTyVar -> TcType -> TcS StopOrContinue --- Precondition: LHS and RHS have compatible kinds --- (guaranteed by canEqLeaf0 -canEqLeafTyVarEq loc ev tv s2 -- ev :: tv ~ s2 - = do { traceTcS "canEqLeafTyVarEq" $ pprEq (mkTyVarTy tv) s2 + + ; mb <- rewriteCtFlavor ev (mkTcEqPred fam_head xi2) xco + ; case mb of + Nothing -> return Stop + Just new_ev | typeKind fam_head `isSubKind` typeKind xi2 + -- Establish CFunEqCan kind invariant + -> continueWith (CFunEqCan { cc_ev = new_ev, cc_loc = loc + , cc_fun = fn, cc_tyargs = xis1, cc_rhs = xi2 }) + | otherwise + -> checkKind loc new_ev fam_head xi2 } + +canEqLeafTyVar :: CtLoc -> CtEvidence + -> TcTyVar -> TcType -> TcS StopOrContinue +canEqLeafTyVar loc ev tv s2 -- ev :: tv ~ s2 + = do { traceTcS "canEqLeafTyVar 1" $ pprEq (mkTyVarTy tv) s2 ; let flav = ctEvFlavour ev ; (xi1,co1) <- flattenTyVar loc FMFullFlatten flav tv -- co1 :: xi1 ~ tv ; (xi2,co2) <- flatten loc FMFullFlatten flav s2 -- co2 :: xi2 ~ s2 ; let co = mkHdEqPred s2 co1 co2 -- co :: (xi1 ~ xi2) ~ (tv ~ s2) - ; traceTcS "canEqLeafTyVarEq2" $ vcat [ppr xi1, ppr xi1] - ; case (getTyVar_maybe xi1, getTyVar_maybe xi2) of { + ; traceTcS "canEqLeafTyVar 2" $ vcat [ppr xi1, ppr xi2] + ; case (getTyVar_maybe xi1, getTyVar_maybe xi2) of (Nothing, _) -> -- Rewriting the LHS did not yield a type variable -- so go around again to canEq do { mb <- rewriteCtFlavor ev (mkTcEqPred xi1 xi2) co ; case mb of Nothing -> return Stop - Just new_ev -> canEqNC loc new_ev xi1 xi2 } ; + Just new_ev -> canEqNC loc new_ev xi1 xi2 } (Just tv1, Just tv2) | tv1 == tv2 -> do { when (isWanted ev) $ setEvBind (ctev_evar ev) (mkEvCast (EvCoercion (mkTcReflCo xi1)) co) - ; return Stop } ; - - (Just tv1, _) -> - - -- LHS rewrote to a type variable, RHS to something else - do { dflags <- getDynFlags - ; case occurCheckExpand dflags tv1 xi2 of - OC_OK xi2' -> -- No occurs check, so we can continue; but make sure - -- that the new goal has enough type synonyms expanded by - -- by the occurCheckExpand - checkKind loc ev xi1 xi2' co $ \new_ev -> - continueWith (CTyEqCan { cc_ev = new_ev, cc_loc = loc - , cc_tyvar = tv1, cc_rhs = xi2' }) - - _bad -> -- Occurs check error - do { mb <- rewriteCtFlavor ev (mkTcEqPred xi1 xi2) co - ; case mb of - Nothing -> return Stop - Just new_ev -> canEqFailure loc new_ev xi1 xi2 } - } } } + ; return Stop } + + (Just tv1, _) -> do { dflags <- getDynFlags + ; canEqLeafTyVar2 dflags loc ev tv1 xi2 co } } + +canEqLeafTyVar2 :: DynFlags -> CtLoc -> CtEvidence + -> TyVar -> Type -> TcCoercion + -> TcS StopOrContinue +-- LHS rewrote to a type variable, +-- RHS to something else (possibly a tyvar, but not the *same* tyvar) +canEqLeafTyVar2 dflags loc ev tv1 xi2 co + | OC_OK xi2' <- occurCheckExpand dflags tv1 xi2 -- No occurs check + = do { mb <- rewriteCtFlavor ev (mkTcEqPred xi1 xi2') co + -- Ensure that the new goal has enough type synonyms + -- expanded by the occurCheckExpand; hence using xi2' here + + ; case mb of + Nothing -> return Stop + Just new_ev | typeKind xi2' `isSubKind` tyVarKind tv1 + -- Establish CTyEqCan kind invariant + -- Reorientation has done its best, but the kinds might + -- simply be incompatible + -> continueWith (CTyEqCan { cc_ev = new_ev, cc_loc = loc + , cc_tyvar = tv1, cc_rhs = xi2' }) + | otherwise + -> checkKind loc new_ev xi1 xi2' } + + | otherwise -- Occurs check error + = do { mb <- rewriteCtFlavor ev (mkTcEqPred xi1 xi2) co + ; case mb of + Nothing -> return Stop + Just new_ev -> canEqFailure loc new_ev xi1 xi2 } + where + xi1 = mkTyVarTy tv1 checkKind :: CtLoc -> CtEvidence -- t1~t2 -> TcType -> TcType -- s1~s2, flattened and zonked - -> TcCoercion -- (s1~s2) ~ (t2~t2) - -> (CtEvidence -> TcS StopOrContinue) -- Do this if kinds are OK -> TcS StopOrContinue --- Do the rewrite, test for incompatible kinds, and continue --- --- See Note [Equalities with incompatible kinds] --- If there are incompatible kinds, emit an "irreducible" constraint +-- LHS and RHS have incompatible kinds, so emit an "irreducible" constraint -- CIrredEvCan (NOT CTyEqCan or CFunEqCan) -- for the type equality; and continue with the kind equality constraint. -- When the latter is solved, it'll kick out the irreducible equality for -- a second attempt at solving +-- See Note [Equalities with incompatible kinds] -checkKind loc ev s1 s2 co normal_kont - = do { mb <- rewriteCtFlavor ev (mkTcEqPred s1 s2) co - ; case mb of { - Nothing -> return Stop ; - Just new_ev | k1 `compatKind` k2 -> normal_kont new_ev - | otherwise -> - - ASSERT( isKind k1 && isKind k2 ) +checkKind loc new_ev s1 s2 + = ASSERT( isKind k1 && isKind k2 ) do { -- See Note [Equalities with incompatible kinds] - traceTcS "canEqLeaf: incompatible kinds" (vcat [ppr s1 <+> dcolon <+> ppr k1, - ppr s2 <+> dcolon <+> ppr k2]) + traceTcS "canEqLeaf: incompatible kinds" (vcat [ppr k1, ppr k2]) ; updWorkListTcS $ extendWorkListNonEq $ CIrredEvCan { cc_ev = new_ev, cc_loc = loc } ; mw <- newDerived (mkEqPred k1 k2) ; case mw of Nothing -> return Stop - Just kev -> canEqNC kind_co_loc kev k1 k2 } } } - where - k1 = typeKind s1 - k2 = typeKind s2 + Just kev -> canEqNC kind_co_loc kev k1 k2 } -- Always create a Wanted kind equality even if -- you are decomposing a given constraint. -- NB: DV finds this reasonable for now. Maybe we have to revisit. + where + k1 = typeKind s1 + k2 = typeKind s2 kind_co_loc = setCtLocOrigin loc (KindEqOrigin s1 s2 (ctLocOrigin loc)) @@ -1206,8 +1217,10 @@ the fsk. Note [Equalities with incompatible kinds] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ canEqLeaf is about to make a CTyEqCan or CFunEqCan; but both have the -invariant that LHS and RHS have `compatKind` kinds. What if we try -to unify two things with incompatible kinds? +invariant that LHS and RHS satisfy the kind invariants for CTyEqCan, +CFunEqCan. What if we try to unify two things with incompatible +kinds? + eg a ~ b where a::*, b::*->* or a ~ b where a::*, b::k, k is a kind variable @@ -1227,6 +1240,9 @@ NB: it is important that the types s1,s2 are flattened and zonked E.g. it is WRONG to make an irred (a:k1)~(b:k2) if we already have a substitution k1:=k2 +See also Note [Kind orientation for CTyEqCan] and + Note [Kind orientation for CFunEqCan] in TcRnTypes + Note [Type synonyms and canonicalization] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We treat type synonym applications as xi types, that is, they do not diff --git a/compiler/typecheck/TcDeriv.lhs b/compiler/typecheck/TcDeriv.lhs index b6d0b7c4bcde..bd682014f33b 100644 --- a/compiler/typecheck/TcDeriv.lhs +++ b/compiler/typecheck/TcDeriv.lhs @@ -579,6 +579,7 @@ deriveStandalone (L loc (DerivDecl deriv_ty)) , text "cls:" <+> ppr cls , text "tys:" <+> ppr inst_tys ] -- C.f. TcInstDcls.tcLocalInstDecl1 + ; checkTc (not (null inst_tys)) derivingNullaryErr ; let cls_tys = take (length inst_tys - 1) inst_tys inst_ty = last inst_tys @@ -590,8 +591,8 @@ deriveStandalone (L loc (DerivDecl deriv_ty)) (Just theta) } ------------------------------------------------------------------ -deriveTyData :: [TyVar] -> TyCon -> [Type] - -> LHsType Name -- The deriving predicate +deriveTyData :: [TyVar] -> TyCon -> [Type] -- LHS of data or data instance + -> LHsType Name -- The deriving predicate -> TcM EarlyDerivSpec -- The deriving clause of a data or newtype declaration deriveTyData tvs tc tc_args (L loc deriv_pred) @@ -617,14 +618,16 @@ deriveTyData tvs tc tc_args (L loc deriv_pred) -- Given data T a b c = ... deriving( C d ), -- we want to drop type variables from T so that (C d (T a)) is well-kinded ; let cls_tyvars = classTyVars cls - kind = tyVarKind (last cls_tyvars) + ; checkTc (not (null cls_tyvars)) derivingNullaryErr + + ; let kind = tyVarKind (last cls_tyvars) (arg_kinds, _) = splitKindFunTys kind n_args_to_drop = length arg_kinds n_args_to_keep = tyConArity tc - n_args_to_drop args_to_drop = drop n_args_to_keep tc_args inst_ty = mkTyConApp tc (take n_args_to_keep tc_args) inst_ty_kind = typeKind inst_ty - dropped_tvs = mkVarSet (mapCatMaybes getTyVar_maybe args_to_drop) + dropped_tvs = tyVarsOfTypes args_to_drop univ_tvs = (mkVarSet tvs `extendVarSetList` deriv_tvs) `minusVarSet` dropped_tvs @@ -635,22 +638,19 @@ deriveTyData tvs tc tc_args (L loc deriv_pred) ; checkTc (n_args_to_keep >= 0 && (inst_ty_kind `eqKind` kind)) (derivingKindErr tc cls cls_tys kind) - ; checkTc (sizeVarSet dropped_tvs == n_args_to_drop && -- (a) - tyVarsOfTypes (inst_ty:cls_tys) `subVarSet` univ_tvs) -- (b) + ; checkTc (all isTyVarTy args_to_drop && -- (a) + sizeVarSet dropped_tvs == n_args_to_drop && -- (b) + tyVarsOfTypes (inst_ty:cls_tys) `subVarSet` univ_tvs) -- (c) (derivingEtaErr cls cls_tys inst_ty) -- Check that + -- (a) The args to drop are all type variables; eg reject: + -- data instance T a Int = .... deriving( Monad ) -- (a) The data type can be eta-reduced; eg reject: -- data instance T a a = ... deriving( Monad ) -- (b) The type class args do not mention any of the dropped type -- variables -- newtype T a s = ... deriving( ST s ) - -- Type families can't be partially applied - -- e.g. newtype instance T Int a = MkT [a] deriving( Monad ) - -- Note [Deriving, type families, and partial applications] - ; checkTc (not (isFamilyTyCon tc) || n_args_to_drop == 0) - (typeFamilyPapErr tc cls cls_tys inst_ty) - ; mkEqnHelp DerivOrigin (varSetElemsKvsFirst univ_tvs) cls cls_tys inst_ty Nothing } } where kindVarsOnly :: [Type] -> [Type] @@ -660,33 +660,6 @@ deriveTyData tvs tc tc_args (L loc deriv_pred) | otherwise = kindVarsOnly ts \end{code} -Note [Deriving, type families, and partial applications] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -When there are no type families, it's quite easy: - - newtype S a = MkS [a] - -- :CoS :: S ~ [] -- Eta-reduced - - instance Eq [a] => Eq (S a) -- by coercion sym (Eq (:CoS a)) : Eq [a] ~ Eq (S a) - instance Monad [] => Monad S -- by coercion sym (Monad :CoS) : Monad [] ~ Monad S - -When type familes are involved it's trickier: - - data family T a b - newtype instance T Int a = MkT [a] deriving( Eq, Monad ) - -- :RT is the representation type for (T Int a) - -- :CoF:R1T a :: T Int a ~ :RT a -- Not eta reduced - -- :Co:R1T :: :RT ~ [] -- Eta-reduced - - instance Eq [a] => Eq (T Int a) -- easy by coercion - instance Monad [] => Monad (T Int) -- only if we can eta reduce??? - -The "???" bit is that we don't build the :CoF thing in eta-reduced form -Henc the current typeFamilyPapErr, even though the instance makes sense. -After all, we can write it out - instance Monad [] => Monad (T Int) -- only if we can eta reduce??? - return x = MkT [x] - ... etc ... \begin{code} mkEqnHelp :: CtOrigin -> [TyVar] -> Class -> [Type] -> Type @@ -703,6 +676,7 @@ mkEqnHelp orig tvs cls cls_tys tc_app mtheta , className cls == typeableClassName || isAlgTyCon tycon -- Avoid functions, primitive types, etc, unless it's Typeable = mk_alg_eqn tycon tc_args + | otherwise = failWithTc (derivingThingErr False cls cls_tys tc_app (ptext (sLit "The last argument of the instance must be a data or newtype application"))) @@ -721,22 +695,30 @@ mkEqnHelp orig tvs cls cls_tys tc_app mtheta -- We checked for errors before, so we don't need to do that again = mkPolyKindedTypeableEqn orig tvs cls cls_tys tycon tc_args mtheta - | isDataFamilyTyCon tycon - , length tc_args /= tyConArity tycon - = bale_out (ptext (sLit "Unsaturated data family application")) - | otherwise - = do { (rep_tc, rep_tc_args) <- tcLookupDataFamInst tycon tc_args + = do { (rep_tc, rep_tc_args) <- lookup_data_fam tycon tc_args -- Be careful to test rep_tc here: in the case of families, -- we want to check the instance tycon, not the family tycon -- For standalone deriving (mtheta /= Nothing), -- check that all the data constructors are in scope. ; rdr_env <- getGlobalRdrEnv - ; let hidden_data_cons = not (isWiredInName (tyConName rep_tc)) && + ; let data_con_names = map dataConName (tyConDataCons rep_tc) + hidden_data_cons = not (isWiredInName (tyConName rep_tc)) && (isAbstractTyCon rep_tc || - any not_in_scope (tyConDataCons rep_tc)) - not_in_scope dc = null (lookupGRE_Name rdr_env (dataConName dc)) + any not_in_scope data_con_names) + not_in_scope dc = null (lookupGRE_Name rdr_env dc) + + -- Make a Qual RdrName that will do for each DataCon + -- so we can report it as used (Trac #7969) + data_con_rdrs = [ mkRdrQual (is_as (is_decl imp_spec)) occ + | dc_name <- data_con_names + , let occ = nameOccName dc_name + gres = lookupGRE_Name rdr_env dc_name + , not (null gres) + , Imported (imp_spec:_) <- [gre_prov (head gres)] ] + + ; addUsedRdrNames data_con_rdrs ; unless (isNothing mtheta || not hidden_data_cons) (bale_out (derivingHiddenErr tycon)) @@ -747,8 +729,85 @@ mkEqnHelp orig tvs cls cls_tys tc_app mtheta else mkNewTypeEqn orig dflags tvs cls cls_tys tycon tc_args rep_tc rep_tc_args mtheta } + + lookup_data_fam :: TyCon -> [Type] -> TcM (TyCon, [Type]) + -- Find the instance of a data family + -- Note [Looking up family instances for deriving] + lookup_data_fam tycon tys + | not (isFamilyTyCon tycon) + = return (tycon, tys) + | otherwise + = ASSERT( isAlgTyCon tycon ) + do { maybeFamInst <- tcLookupFamInst tycon tys + ; case maybeFamInst of + Nothing -> bale_out (ptext (sLit "No family instance for") + <+> quotes (pprTypeApp tycon tys)) + Just (FamInstMatch { fim_instance = famInst + , fim_index = index + , fim_tys = tys }) + -> ASSERT( index == 0 ) + let tycon' = dataFamInstRepTyCon famInst + in return (tycon', tys) } \end{code} +Note [Looking up family instances for deriving] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +tcLookupFamInstExact is an auxiliary lookup wrapper which requires +that looked-up family instances exist. If called with a vanilla +tycon, the old type application is simply returned. + +If we have + data instance F () = ... deriving Eq + data instance F () = ... deriving Eq +then tcLookupFamInstExact will be confused by the two matches; +but that can't happen because tcInstDecls1 doesn't call tcDeriving +if there are any overlaps. + +There are two other things that might go wrong with the lookup. +First, we might see a standalone deriving clause + deriving Eq (F ()) +when there is no data instance F () in scope. + +Note that it's OK to have + data instance F [a] = ... + deriving Eq (F [(a,b)]) +where the match is not exact; the same holds for ordinary data types +with standalone deriving declarations. + +Note [Deriving, type families, and partial applications] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +When there are no type families, it's quite easy: + + newtype S a = MkS [a] + -- :CoS :: S ~ [] -- Eta-reduced + + instance Eq [a] => Eq (S a) -- by coercion sym (Eq (:CoS a)) : Eq [a] ~ Eq (S a) + instance Monad [] => Monad S -- by coercion sym (Monad :CoS) : Monad [] ~ Monad S + +When type familes are involved it's trickier: + + data family T a b + newtype instance T Int a = MkT [a] deriving( Eq, Monad ) + -- :RT is the representation type for (T Int a) + -- :Co:RT :: :RT ~ [] -- Eta-reduced! + -- :CoF:RT a :: T Int a ~ :RT a -- Also eta-reduced! + + instance Eq [a] => Eq (T Int a) -- easy by coercion + -- d1 :: Eq [a] + -- d2 :: Eq (T Int a) = d1 |> Eq (sym (:Co:RT a ; :coF:RT a)) + + instance Monad [] => Monad (T Int) -- only if we can eta reduce??? + -- d1 :: Monad [] + -- d2 :: Monad (T Int) = d1 |> Monad (sym (:Co:RT ; :coF:RT)) + +Note the need for the eta-reduced rule axioms. After all, we can +write it out + instance Monad [] => Monad (T Int) -- only if we can eta reduce??? + return x = MkT [x] + ... etc ... + +See Note [Eta reduction for data family axioms] in TcInstDcls. + %************************************************************************ %* * @@ -872,7 +931,7 @@ inferConstraints cls inst_tys rep_tc rep_tc_args = return [] | cls `hasKey` gen1ClassKey -- Gen1 needs Functor - = ASSERT (length rep_tc_tvs > 0) -- See Note [Getting base classes] + = ASSERT(length rep_tc_tvs > 0) -- See Note [Getting base classes] do { functorClass <- tcLookupClass functorClassName ; return (con_arg_constraints functorClass (get_gen1_constrained_tys last_tv)) } @@ -1829,6 +1888,9 @@ genDerivStuff loc fix_env clas name tycon comaux_maybe %************************************************************************ \begin{code} +derivingNullaryErr :: MsgDoc +derivingNullaryErr = ptext (sLit "Cannot derive instances for nullary classes") + derivingKindErr :: TyCon -> Class -> [Type] -> Kind -> MsgDoc derivingKindErr tc cls cls_tys cls_kind = hang (ptext (sLit "Cannot derive well-kinded instance of form") @@ -1842,11 +1904,6 @@ derivingEtaErr cls cls_tys inst_ty nest 2 (ptext (sLit "instance (...) =>") <+> pprClassPred cls (cls_tys ++ [inst_ty]))] -typeFamilyPapErr :: TyCon -> Class -> [Type] -> Type -> MsgDoc -typeFamilyPapErr tc cls cls_tys inst_ty - = hang (ptext (sLit "Derived instance") <+> quotes (pprClassPred cls (cls_tys ++ [inst_ty]))) - 2 (ptext (sLit "requires illegal partial application of data type family") <+> ppr tc) - derivingThingErr :: Bool -> Class -> [Type] -> Type -> MsgDoc -> MsgDoc derivingThingErr newtype_deriving clas tys ty why = sep [(hang (ptext (sLit "Can't make a derived instance of")) diff --git a/compiler/typecheck/TcEvidence.lhs b/compiler/typecheck/TcEvidence.lhs index 1d3d619a5f70..eab839a38038 100644 --- a/compiler/typecheck/TcEvidence.lhs +++ b/compiler/typecheck/TcEvidence.lhs @@ -233,7 +233,7 @@ tcCoercionKind co = go co eqVarKind :: EqVar -> Pair Type eqVarKind cv | Just (tc, [_kind,ty1,ty2]) <- tcSplitTyConApp_maybe (varType cv) - = ASSERT (tc `hasKey` eqTyConKey) + = ASSERT(tc `hasKey` eqTyConKey) Pair ty1 ty2 | otherwise = pprPanic "eqVarKind, non coercion variable" (ppr cv <+> dcolon <+> ppr (varType cv)) diff --git a/compiler/typecheck/TcExpr.lhs b/compiler/typecheck/TcExpr.lhs index 13761a5d183e..8615293a17e0 100644 --- a/compiler/typecheck/TcExpr.lhs +++ b/compiler/typecheck/TcExpr.lhs @@ -5,22 +5,15 @@ \section[TcExpr]{Typecheck an expression} \begin{code} -{-# OPTIONS -fno-warn-tabs #-} --- The above warning supression flag is a temporary kludge. --- While working on this module you are encouraged to remove it and --- detab the module (please do the detabbing in a separate patch). See --- http://hackage.haskell.org/trac/ghc/wiki/Commentary/CodingStyle#TabsvsSpaces --- for details - -module TcExpr ( tcPolyExpr, tcPolyExprNC, tcMonoExpr, tcMonoExprNC, - tcInferRho, tcInferRhoNC, +module TcExpr ( tcPolyExpr, tcPolyExprNC, tcMonoExpr, tcMonoExprNC, + tcInferRho, tcInferRhoNC, tcSyntaxOp, tcCheckId, addExprErrCtxt) where - + #include "HsVersions.h" -#ifdef GHCI /* Only if bootstrapped */ -import {-# SOURCE #-} TcSplice( tcSpliceExpr, tcBracket ) +#ifdef GHCI /* Only if bootstrapped */ +import {-# SOURCE #-} TcSplice( tcSpliceExpr, tcBracket ) import qualified DsMeta #endif @@ -68,37 +61,37 @@ import Class(classTyCon) \end{code} %************************************************************************ -%* * +%* * \subsection{Main wrappers} -%* * +%* * %************************************************************************ \begin{code} tcPolyExpr, tcPolyExprNC - :: LHsExpr Name -- Expression to type check - -> TcSigmaType -- Expected type (could be a polytpye) - -> TcM (LHsExpr TcId) -- Generalised expr with expected type + :: LHsExpr Name -- Expression to type check + -> TcSigmaType -- Expected type (could be a polytpye) + -> TcM (LHsExpr TcId) -- Generalised expr with expected type -- tcPolyExpr is a convenient place (frequent but not too frequent) -- place to add context information. -- The NC version does not do so, usually because the caller wants -- to do so himself. -tcPolyExpr expr res_ty +tcPolyExpr expr res_ty = addExprErrCtxt expr $ do { traceTc "tcPolyExpr" (ppr res_ty); tcPolyExprNC expr res_ty } tcPolyExprNC expr res_ty = do { traceTc "tcPolyExprNC" (ppr res_ty) ; (gen_fn, expr') <- tcGen GenSigCtxt res_ty $ \ _ rho -> - tcMonoExprNC expr rho + tcMonoExprNC expr rho ; return (mkLHsWrap gen_fn expr') } --------------- -tcMonoExpr, tcMonoExprNC +tcMonoExpr, tcMonoExprNC :: LHsExpr Name -- Expression to type check -> TcRhoType -- Expected type (could be a type variable) - -- Definitely no foralls at the top + -- Definitely no foralls at the top -> TcM (LHsExpr TcId) tcMonoExpr expr res_ty @@ -108,8 +101,8 @@ tcMonoExpr expr res_ty tcMonoExprNC (L loc expr) res_ty = ASSERT( not (isSigmaTy res_ty) ) setSrcSpan loc $ - do { expr' <- tcExpr expr res_ty - ; return (L loc expr') } + do { expr' <- tcExpr expr res_ty + ; return (L loc expr') } --------------- tcInferRho, tcInferRhoNC :: LHsExpr Name -> TcM (LHsExpr TcId, TcRhoType) @@ -118,7 +111,7 @@ tcInferRho, tcInferRhoNC :: LHsExpr Name -> TcM (LHsExpr TcId, TcRhoType) -- f :: Int -> (forall a. a -> a) -> Int -- then we can infer -- f 3 :: (forall a. a -> a) -> Int --- And that in turn is useful +-- And that in turn is useful -- (a) for the function part of any application (see tcApp) -- (b) for the special rule for '$' tcInferRho expr = addErrCtxt (exprCtxt expr) (tcInferRhoNC expr) @@ -129,14 +122,14 @@ tcInferRhoNC (L loc expr) ; return (L loc expr', rho) } tcInfExpr :: HsExpr Name -> TcM (HsExpr TcId, TcRhoType) -tcInfExpr (HsVar f) = tcInferId f -tcInfExpr (HsPar e) = do { (e', ty) <- tcInferRhoNC e +tcInfExpr (HsVar f) = tcInferId f +tcInfExpr (HsPar e) = do { (e', ty) <- tcInferRhoNC e ; return (HsPar e', ty) } -tcInfExpr (HsApp e1 e2) = tcInferApp e1 [e2] +tcInfExpr (HsApp e1 e2) = tcInferApp e1 [e2] tcInfExpr e = tcInfer (tcExpr e) tcHole :: OccName -> TcRhoType -> TcM (HsExpr TcId) -tcHole occ res_ty +tcHole occ res_ty = do { ty <- newFlexiTyVarTy liftedTypeKind ; name <- newSysName occ ; let ev = mkLocalId name ty @@ -148,47 +141,47 @@ tcHole occ res_ty %************************************************************************ -%* * - tcExpr: the main expression typechecker -%* * +%* * + tcExpr: the main expression typechecker +%* * %************************************************************************ \begin{code} tcExpr :: HsExpr Name -> TcRhoType -> TcM (HsExpr TcId) tcExpr e res_ty | debugIsOn && isSigmaTy res_ty -- Sanity check - = pprPanic "tcExpr: sigma" (ppr res_ty $$ ppr e) + = pprPanic "tcExpr: sigma" (ppr res_ty $$ ppr e) tcExpr (HsVar name) res_ty = tcCheckId name res_ty tcExpr (HsApp e1 e2) res_ty = tcApp e1 [e2] res_ty tcExpr (HsLit lit) res_ty = do { let lit_ty = hsLitType lit - ; tcWrapResult (HsLit lit) lit_ty res_ty } + ; tcWrapResult (HsLit lit) lit_ty res_ty } tcExpr (HsPar expr) res_ty = do { expr' <- tcMonoExprNC expr res_ty - ; return (HsPar expr') } + ; return (HsPar expr') } -tcExpr (HsSCC lbl expr) res_ty +tcExpr (HsSCC lbl expr) res_ty = do { expr' <- tcMonoExpr expr res_ty ; return (HsSCC lbl expr') } -tcExpr (HsTickPragma info expr) res_ty +tcExpr (HsTickPragma info expr) res_ty = do { expr' <- tcMonoExpr expr res_ty ; return (HsTickPragma info expr') } tcExpr (HsCoreAnn lbl expr) res_ty - = do { expr' <- tcMonoExpr expr res_ty - ; return (HsCoreAnn lbl expr') } + = do { expr' <- tcMonoExpr expr res_ty + ; return (HsCoreAnn lbl expr') } -tcExpr (HsOverLit lit) res_ty - = do { lit' <- newOverloadedLit (LiteralOrigin lit) lit res_ty - ; return (HsOverLit lit') } +tcExpr (HsOverLit lit) res_ty + = do { lit' <- newOverloadedLit (LiteralOrigin lit) lit res_ty + ; return (HsOverLit lit') } tcExpr (NegApp expr neg_expr) res_ty - = do { neg_expr' <- tcSyntaxOp NegateOrigin neg_expr - (mkFunTy res_ty res_ty) - ; expr' <- tcMonoExpr expr res_ty - ; return (NegApp expr' neg_expr') } + = do { neg_expr' <- tcSyntaxOp NegateOrigin neg_expr + (mkFunTy res_ty res_ty) + ; expr' <- tcMonoExpr expr res_ty + ; return (NegApp expr' neg_expr') } tcExpr (HsIPVar x) res_ty = do { let origin = IPOccOrigin x @@ -209,13 +202,13 @@ tcExpr (HsIPVar x) res_ty Nothing -> panic "The dictionary for `IP` is not a newtype?" tcExpr (HsLam match) res_ty - = do { (co_fn, match') <- tcMatchLambda match res_ty - ; return (mkHsWrap co_fn (HsLam match')) } + = do { (co_fn, match') <- tcMatchLambda match res_ty + ; return (mkHsWrap co_fn (HsLam match')) } tcExpr e@(HsLamCase _ matches) res_ty - = do { (co_fn, [arg_ty], body_ty) <- matchExpectedFunTys msg 1 res_ty - ; matches' <- tcMatchesCase match_ctxt arg_ty matches body_ty - ; return $ mkHsWrapCo co_fn $ HsLamCase arg_ty matches' } + = do { (co_fn, [arg_ty], body_ty) <- matchExpectedFunTys msg 1 res_ty + ; matches' <- tcMatchesCase match_ctxt arg_ty matches body_ty + ; return $ mkHsWrapCo co_fn $ HsLamCase arg_ty matches' } where msg = sep [ ptext (sLit "The function") <+> quotes (ppr e) , ptext (sLit "requires")] match_ctxt = MC { mc_what = CaseAlt, mc_body = tcBody } @@ -224,11 +217,11 @@ tcExpr (ExprWithTySig expr sig_ty) res_ty = do { sig_tc_ty <- tcHsSigType ExprSigCtxt sig_ty -- Remember to extend the lexical type-variable environment - ; (gen_fn, expr') + ; (gen_fn, expr') <- tcGen ExprSigCtxt sig_tc_ty $ \ skol_tvs res_ty -> - tcExtendTyVarEnv2 (hsExplicitTvs sig_ty `zip` skol_tvs) $ - -- See Note [More instantiated than scoped] in TcBinds - tcMonoExprNC expr res_ty + tcExtendTyVarEnv2 (hsExplicitTvs sig_ty `zip` skol_tvs) $ + -- See Note [More instantiated than scoped] in TcBinds + tcMonoExprNC expr res_ty ; let inner_expr = ExprWithTySigOut (mkLHsWrap gen_fn expr') sig_ty @@ -237,28 +230,28 @@ tcExpr (ExprWithTySig expr sig_ty) res_ty tcExpr (HsType ty) _ = failWithTc (text "Can't handle type argument:" <+> ppr ty) - -- This is the syntax for type applications that I was planning - -- but there are difficulties (e.g. what order for type args) - -- so it's not enabled yet. - -- Can't eliminate it altogether from the parser, because the - -- same parser parses *patterns*. + -- This is the syntax for type applications that I was planning + -- but there are difficulties (e.g. what order for type args) + -- so it's not enabled yet. + -- Can't eliminate it altogether from the parser, because the + -- same parser parses *patterns*. tcExpr (HsUnboundVar v) res_ty = tcHole (rdrNameOcc v) res_ty \end{code} %************************************************************************ -%* * - Infix operators and sections -%* * +%* * + Infix operators and sections +%* * %************************************************************************ Note [Left sections] ~~~~~~~~~~~~~~~~~~~~ Left sections, like (4 *), are equivalent to - \ x -> (*) 4 x, + \ x -> (*) 4 x, or, if PostfixOperators is enabled, just - (*) 4 + (*) 4 With PostfixOperators we don't actually require the function to take two arguments at all. For example, (x `not`) means (not x); you get postfix operators! Not Haskell 98, but it's less work and kind of @@ -266,14 +259,14 @@ useful. Note [Typing rule for ($)] ~~~~~~~~~~~~~~~~~~~~~~~~~~ -People write +People write runST $ blah -so much, where +so much, where runST :: (forall s. ST s a) -> a that I have finally given in and written a special type-checking -rule just for saturated appliations of ($). +rule just for saturated appliations of ($). * Infer the type of the first argument - * Decompose it; should be of form (arg2_ty -> res_ty), + * Decompose it; should be of form (arg2_ty -> res_ty), where arg2_ty might be a polytype * Use arg2_ty to typecheck arg2 @@ -282,26 +275,26 @@ Note [Typing rule for seq] We want to allow x `seq` (# p,q #) which suggests this type for seq: - seq :: forall (a:*) (b:??). a -> b -> b, + seq :: forall (a:*) (b:??). a -> b -> b, with (b:??) meaning that be can be instantiated with an unboxed tuple. But that's ill-kinded! Function arguments can't be unboxed tuples. And indeed, you could not expect to do this with a partially-applied 'seq'; it's only going to work when it's fully applied. so it turns -into +into case x of _ -> (# p,q #) For a while I slid by by giving 'seq' an ill-kinded type, but then -the simplifier eta-reduced an application of seq and Lint blew up +the simplifier eta-reduced an application of seq and Lint blew up with a kind error. It seems more uniform to treat 'seq' as it it -was a language construct. +was a language construct. -See Note [seqId magic] in MkId, and +See Note [seqId magic] in MkId, and \begin{code} tcExpr (OpApp arg1 op fix arg2) res_ty | (L loc (HsVar op_name)) <- op - , op_name `hasKey` seqIdKey -- Note [Typing rule for seq] + , op_name `hasKey` seqIdKey -- Note [Typing rule for seq] = do { arg1_ty <- newFlexiTyVarTy liftedTypeKind ; let arg2_ty = res_ty ; arg1' <- tcArg op (arg1, arg1_ty, 1) @@ -311,14 +304,14 @@ tcExpr (OpApp arg1 op fix arg2) res_ty ; return $ OpApp arg1' op' fix arg2' } | (L loc (HsVar op_name)) <- op - , op_name `hasKey` dollarIdKey -- Note [Typing rule for ($)] + , op_name `hasKey` dollarIdKey -- Note [Typing rule for ($)] = do { traceTc "Application rule" (ppr op) ; (arg1', arg1_ty) <- tcInferRho arg1 ; let doc = ptext (sLit "The first argument of ($) takes") ; (co_arg1, [arg2_ty], op_res_ty) <- matchExpectedFunTys doc 1 arg1_ty -- arg1_ty = arg2_ty -> op_res_ty - -- And arg2_ty maybe polymorphic; that's the point + -- And arg2_ty maybe polymorphic; that's the point -- Make sure that the argument and result types have kind '*' -- Eg we do not want to allow (D# $ 4.0#) Trac #5570 @@ -339,8 +332,8 @@ tcExpr (OpApp arg1 op fix arg2) res_ty ; let op' = L loc (HsWrap (mkWpTyApps [a_ty, b_ty]) (HsVar op_id)) ; return $ mkHsWrapCo (co_res) $ OpApp (mkLHsWrapCo (mkTcFunCo co_a co_b) $ - mkLHsWrapCo co_arg1 arg1') - op' fix + mkLHsWrapCo co_arg1 arg1') + op' fix (mkLHsWrapCo co_a arg2') } | otherwise @@ -353,19 +346,19 @@ tcExpr (OpApp arg1 op fix arg2) res_ty OpApp arg1' (mkLHsWrapCo co_fn op') fix arg2' } -- Right sections, equivalent to \ x -> x `op` expr, or --- \ x -> op x expr - +-- \ x -> op x expr + tcExpr (SectionR op arg2) res_ty = do { (op', op_ty) <- tcInferFun op ; (co_fn, [arg1_ty, arg2_ty], op_res_ty) <- unifyOpFunTysWrap op 2 op_ty ; co_res <- unifyType (mkFunTy arg1_ty op_res_ty) res_ty ; arg2' <- tcArg op (arg2, arg2_ty, 2) ; return $ mkHsWrapCo co_res $ - SectionR (mkLHsWrapCo co_fn op') arg2' } + SectionR (mkLHsWrapCo co_fn op') arg2' } tcExpr (SectionL arg1 op) res_ty = do { (op', op_ty) <- tcInferFun op - ; dflags <- getDynFlags -- Note [Left sections] + ; dflags <- getDynFlags -- Note [Left sections] ; let n_reqd_args | xopt Opt_PostfixOperators dflags = 1 | otherwise = 2 @@ -381,12 +374,12 @@ tcExpr (ExplicitTuple tup_args boxity) res_ty ; (coi, arg_tys) <- matchExpectedTyConApp tup_tc res_ty ; tup_args1 <- tcTupArgs tup_args arg_tys ; return $ mkHsWrapCo coi (ExplicitTuple tup_args1 boxity) } - + | otherwise = -- The tup_args are a mixture of Present and Missing (for tuple sections) do { let kind = case boxity of { Boxed -> liftedTypeKind ; Unboxed -> openTypeKind } - arity = length tup_args + arity = length tup_args tup_tc = tupleTyCon (boxityNormalTupleSort boxity) arity ; arg_tys <- newFlexiTyVarTys (tyConArity tup_tc) kind @@ -398,13 +391,13 @@ tcExpr (ExplicitTuple tup_args boxity) res_ty -- Handle tuple sections where ; tup_args1 <- tcTupArgs tup_args arg_tys - + ; return $ mkHsWrapCo coi (ExplicitTuple tup_args1 boxity) } -tcExpr (ExplicitList _ witness exprs) res_ty +tcExpr (ExplicitList _ witness exprs) res_ty = case witness of Nothing -> do { (coi, elt_ty) <- matchExpectedListTy res_ty - ; exprs' <- mapM (tc_elt elt_ty) exprs + ; exprs' <- mapM (tc_elt elt_ty) exprs ; return $ mkHsWrapCo coi (ExplicitList elt_ty Nothing exprs') } Just fln -> do { list_ty <- newFlexiTyVarTy liftedTypeKind @@ -412,48 +405,48 @@ tcExpr (ExplicitList _ witness exprs) res_ty ; (coi, elt_ty) <- matchExpectedListTy list_ty ; exprs' <- mapM (tc_elt elt_ty) exprs ; return $ mkHsWrapCo coi (ExplicitList elt_ty (Just fln') exprs') } - where tc_elt elt_ty expr = tcPolyExpr expr elt_ty + where tc_elt elt_ty expr = tcPolyExpr expr elt_ty -tcExpr (ExplicitPArr _ exprs) res_ty -- maybe empty - = do { (coi, elt_ty) <- matchExpectedPArrTy res_ty - ; exprs' <- mapM (tc_elt elt_ty) exprs - ; return $ mkHsWrapCo coi (ExplicitPArr elt_ty exprs') } +tcExpr (ExplicitPArr _ exprs) res_ty -- maybe empty + = do { (coi, elt_ty) <- matchExpectedPArrTy res_ty + ; exprs' <- mapM (tc_elt elt_ty) exprs + ; return $ mkHsWrapCo coi (ExplicitPArr elt_ty exprs') } where tc_elt elt_ty expr = tcPolyExpr expr elt_ty \end{code} %************************************************************************ -%* * - Let, case, if, do -%* * +%* * + Let, case, if, do +%* * %************************************************************************ \begin{code} tcExpr (HsLet binds expr) res_ty - = do { (binds', expr') <- tcLocalBinds binds $ - tcMonoExpr expr res_ty - ; return (HsLet binds' expr') } + = do { (binds', expr') <- tcLocalBinds binds $ + tcMonoExpr expr res_ty + ; return (HsLet binds' expr') } tcExpr (HsCase scrut matches) exp_ty - = do { -- We used to typecheck the case alternatives first. - -- The case patterns tend to give good type info to use - -- when typechecking the scrutinee. For example - -- case (map f) of - -- (x:xs) -> ... - -- will report that map is applied to too few arguments - -- - -- But now, in the GADT world, we need to typecheck the scrutinee - -- first, to get type info that may be refined in the case alternatives - (scrut', scrut_ty) <- tcInferRho scrut - - ; traceTc "HsCase" (ppr scrut_ty) - ; matches' <- tcMatchesCase match_ctxt scrut_ty matches exp_ty - ; return (HsCase scrut' matches') } + = do { -- We used to typecheck the case alternatives first. + -- The case patterns tend to give good type info to use + -- when typechecking the scrutinee. For example + -- case (map f) of + -- (x:xs) -> ... + -- will report that map is applied to too few arguments + -- + -- But now, in the GADT world, we need to typecheck the scrutinee + -- first, to get type info that may be refined in the case alternatives + (scrut', scrut_ty) <- tcInferRho scrut + + ; traceTc "HsCase" (ppr scrut_ty) + ; matches' <- tcMatchesCase match_ctxt scrut_ty matches exp_ty + ; return (HsCase scrut' matches') } where match_ctxt = MC { mc_what = CaseAlt, - mc_body = tcBody } + mc_body = tcBody } -tcExpr (HsIf Nothing pred b1 b2) res_ty -- Ordinary 'if' +tcExpr (HsIf Nothing pred b1 b2) res_ty -- Ordinary 'if' = do { pred' <- tcMonoExpr pred boolTy ; b1' <- tcMonoExpr b1 res_ty ; b2' <- tcMonoExpr b2 res_ty @@ -484,8 +477,8 @@ tcExpr (HsDo do_or_lc stmts _) res_ty = tcDoStmts do_or_lc stmts res_ty tcExpr (HsProc pat cmd) res_ty - = do { (pat', cmd', coi) <- tcProc pat cmd res_ty - ; return $ mkHsWrapCo coi (HsProc pat' cmd') } + = do { (pat', cmd', coi) <- tcProc pat cmd res_ty + ; return $ mkHsWrapCo coi (HsProc pat' cmd') } \end{code} Note [Rebindable syntax for if] @@ -505,27 +498,27 @@ to support expressions like this: %************************************************************************ -%* * - Record construction and update -%* * +%* * + Record construction and update +%* * %************************************************************************ \begin{code} tcExpr (RecordCon (L loc con_name) _ rbinds) res_ty - = do { data_con <- tcLookupDataCon con_name + = do { data_con <- tcLookupDataCon con_name - -- Check for missing fields - ; checkMissingFields data_con rbinds + -- Check for missing fields + ; checkMissingFields data_con rbinds - ; (con_expr, con_tau) <- tcInferId con_name - ; let arity = dataConSourceArity data_con - (arg_tys, actual_res_ty) = tcSplitFunTysN con_tau arity - con_id = dataConWrapId data_con + ; (con_expr, con_tau) <- tcInferId con_name + ; let arity = dataConSourceArity data_con + (arg_tys, actual_res_ty) = tcSplitFunTysN con_tau arity + con_id = dataConWrapId data_con ; co_res <- unifyType actual_res_ty res_ty ; rbinds' <- tcRecordBinds data_con arg_tys rbinds - ; return $ mkHsWrapCo co_res $ - RecordCon (L loc con_id) con_expr rbinds' } + ; return $ mkHsWrapCo co_res $ + RecordCon (L loc con_id) con_expr rbinds' } \end{code} Note [Type of a record update] @@ -533,12 +526,12 @@ Note [Type of a record update] The main complication with RecordUpd is that we need to explicitly handle the *non-updated* fields. Consider: - data T a b c = MkT1 { fa :: a, fb :: (b,c) } - | MkT2 { fa :: a, fb :: (b,c), fc :: c -> c } - | MkT3 { fd :: a } - - upd :: T a b c -> (b',c) -> T a b' c - upd t x = t { fb = x} + data T a b c = MkT1 { fa :: a, fb :: (b,c) } + | MkT2 { fa :: a, fb :: (b,c), fc :: c -> c } + | MkT3 { fd :: a } + + upd :: T a b c -> (b',c) -> T a b' c + upd t x = t { fb = x} The result type should be (T a b' c) not (T a b c), because 'b' *is not* mentioned in a non-updated field @@ -547,10 +540,10 @@ NB that it's not good enough to look at just one constructor; we must look at them all; cf Trac #3219 After all, upd should be equivalent to: - upd t x = case t of - MkT1 p q -> MkT1 p x - MkT2 a b -> MkT2 p b - MkT3 d -> error ... + upd t x = case t of + MkT1 p q -> MkT1 p x + MkT2 a b -> MkT2 p b + MkT3 d -> error ... So we need to give a completely fresh type to the result record, and then constrain it by the fields that are *not* updated ("p" above). @@ -563,17 +556,17 @@ Hence the use of 'relevant_cont'. Note [Implict type sharing] ~~~~~~~~~~~~~~~~~~~~~~~~~~~ We also take into account any "implicit" non-update fields. For example - data T a b where { MkT { f::a } :: T a a; ... } + data T a b where { MkT { f::a } :: T a a; ... } So the "real" type of MkT is: forall ab. (a~b) => a -> T a b Then consider - upd t x = t { f=x } + upd t x = t { f=x } We infer the type - upd :: T a b -> a -> T a b - upd (t::T a b) (x::a) - = case t of { MkT (co:a~b) (_:a) -> MkT co x } + upd :: T a b -> a -> T a b + upd (t::T a b) (x::a) + = case t of { MkT (co:a~b) (_:a) -> MkT co x } We can't give it the more general type - upd :: T a b -> c -> T c b + upd :: T a b -> c -> T c b Note [Criteria for update] ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -590,7 +583,7 @@ The criterion we use is this: of the data constructor NB: this is not (quite) the same as being a "naughty" record selector -(See Note [Naughty record selectors]) in TcTyClsDecls), at least +(See Note [Naughty record selectors]) in TcTyClsDecls), at least in the case of GADTs. Consider data T a where { MkT :: { f :: a } :: T [a] } Then f is not "naughty" because it has a well-typed record selector. @@ -614,9 +607,9 @@ Suppose r :: T (t1,t2), e :: t3 Then r { x=e } :: T (t3,t1) ---> case r |> co1 of - MkT x y -> MkT e y |> co2 + MkT x y -> MkT e y |> co2 where co1 :: T (t1,t2) ~ :TP t1 t2 - co2 :: :TP t3 t2 ~ T (t3,t2) + co2 :: :TP t3 t2 ~ T (t3,t2) The wrapping with co2 is done by the constructor wrapper for MkT Outgoing invariants @@ -626,111 +619,111 @@ In the outgoing (HsRecordUpd scrut binds cons in_inst_tys out_inst_tys): * cons are the data constructors to be updated * in_inst_tys, out_inst_tys have same length, and instantiate the - *representation* tycon of the data cons. In Note [Data - family example], in_inst_tys = [t1,t2], out_inst_tys = [t3,t2] - + *representation* tycon of the data cons. In Note [Data + family example], in_inst_tys = [t1,t2], out_inst_tys = [t3,t2] + \begin{code} tcExpr (RecordUpd record_expr rbinds _ _ _) res_ty = ASSERT( notNull upd_fld_names ) - do { - -- STEP 0 - -- Check that the field names are really field names - ; sel_ids <- mapM tcLookupField upd_fld_names - -- The renamer has already checked that - -- selectors are all in scope - ; let bad_guys = [ setSrcSpan loc $ addErrTc (notSelector fld_name) - | (fld, sel_id) <- rec_flds rbinds `zip` sel_ids, - not (isRecordSelector sel_id), -- Excludes class ops - let L loc fld_name = hsRecFieldId fld ] - ; unless (null bad_guys) (sequence bad_guys >> failM) - - -- STEP 1 - -- Figure out the tycon and data cons from the first field name - ; let -- It's OK to use the non-tc splitters here (for a selector) - sel_id : _ = sel_ids - (tycon, _) = recordSelectorFieldLabel sel_id -- We've failed already if - data_cons = tyConDataCons tycon -- it's not a field label - -- NB: for a data type family, the tycon is the instance tycon - - relevant_cons = filter is_relevant data_cons - is_relevant con = all (`elem` dataConFieldLabels con) upd_fld_names - -- A constructor is only relevant to this process if - -- it contains *all* the fields that are being updated - -- Other ones will cause a runtime error if they occur - - -- Take apart a representative constructor - con1 = ASSERT( not (null relevant_cons) ) head relevant_cons - (con1_tvs, _, _, _, con1_arg_tys, _) = dataConFullSig con1 - con1_flds = dataConFieldLabels con1 - con1_res_ty = mkFamilyTyConApp tycon (mkTyVarTys con1_tvs) - - -- Step 2 - -- Check that at least one constructor has all the named fields - -- i.e. has an empty set of bad fields returned by badFields - ; checkTc (not (null relevant_cons)) (badFieldsUpd rbinds) - - -- STEP 3 Note [Criteria for update] - -- Check that each updated field is polymorphic; that is, its type - -- mentions only the universally-quantified variables of the data con - ; let flds1_w_tys = zipEqual "tcExpr:RecConUpd" con1_flds con1_arg_tys - upd_flds1_w_tys = filter is_updated flds1_w_tys - is_updated (fld,_) = fld `elem` upd_fld_names - - bad_upd_flds = filter bad_fld upd_flds1_w_tys - con1_tv_set = mkVarSet con1_tvs - bad_fld (fld, ty) = fld `elem` upd_fld_names && - not (tyVarsOfType ty `subVarSet` con1_tv_set) - ; checkTc (null bad_upd_flds) (badFieldTypes bad_upd_flds) - - -- STEP 4 Note [Type of a record update] - -- Figure out types for the scrutinee and result - -- Both are of form (T a b c), with fresh type variables, but with - -- common variables where the scrutinee and result must have the same type - -- These are variables that appear in *any* arg of *any* of the - -- relevant constructors *except* in the updated fields - -- - ; let fixed_tvs = getFixedTyVars con1_tvs relevant_cons - is_fixed_tv tv = tv `elemVarSet` fixed_tvs + do { + -- STEP 0 + -- Check that the field names are really field names + ; sel_ids <- mapM tcLookupField upd_fld_names + -- The renamer has already checked that + -- selectors are all in scope + ; let bad_guys = [ setSrcSpan loc $ addErrTc (notSelector fld_name) + | (fld, sel_id) <- rec_flds rbinds `zip` sel_ids, + not (isRecordSelector sel_id), -- Excludes class ops + let L loc fld_name = hsRecFieldId fld ] + ; unless (null bad_guys) (sequence bad_guys >> failM) + + -- STEP 1 + -- Figure out the tycon and data cons from the first field name + ; let -- It's OK to use the non-tc splitters here (for a selector) + sel_id : _ = sel_ids + (tycon, _) = recordSelectorFieldLabel sel_id -- We've failed already if + data_cons = tyConDataCons tycon -- it's not a field label + -- NB: for a data type family, the tycon is the instance tycon + + relevant_cons = filter is_relevant data_cons + is_relevant con = all (`elem` dataConFieldLabels con) upd_fld_names + -- A constructor is only relevant to this process if + -- it contains *all* the fields that are being updated + -- Other ones will cause a runtime error if they occur + + -- Take apart a representative constructor + con1 = ASSERT( not (null relevant_cons) ) head relevant_cons + (con1_tvs, _, _, _, con1_arg_tys, _) = dataConFullSig con1 + con1_flds = dataConFieldLabels con1 + con1_res_ty = mkFamilyTyConApp tycon (mkTyVarTys con1_tvs) + + -- Step 2 + -- Check that at least one constructor has all the named fields + -- i.e. has an empty set of bad fields returned by badFields + ; checkTc (not (null relevant_cons)) (badFieldsUpd rbinds) + + -- STEP 3 Note [Criteria for update] + -- Check that each updated field is polymorphic; that is, its type + -- mentions only the universally-quantified variables of the data con + ; let flds1_w_tys = zipEqual "tcExpr:RecConUpd" con1_flds con1_arg_tys + upd_flds1_w_tys = filter is_updated flds1_w_tys + is_updated (fld,_) = fld `elem` upd_fld_names + + bad_upd_flds = filter bad_fld upd_flds1_w_tys + con1_tv_set = mkVarSet con1_tvs + bad_fld (fld, ty) = fld `elem` upd_fld_names && + not (tyVarsOfType ty `subVarSet` con1_tv_set) + ; checkTc (null bad_upd_flds) (badFieldTypes bad_upd_flds) + + -- STEP 4 Note [Type of a record update] + -- Figure out types for the scrutinee and result + -- Both are of form (T a b c), with fresh type variables, but with + -- common variables where the scrutinee and result must have the same type + -- These are variables that appear in *any* arg of *any* of the + -- relevant constructors *except* in the updated fields + -- + ; let fixed_tvs = getFixedTyVars con1_tvs relevant_cons + is_fixed_tv tv = tv `elemVarSet` fixed_tvs mk_inst_ty :: TvSubst -> (TKVar, TcType) -> TcM (TvSubst, TcType) -- Deals with instantiation of kind variables -- c.f. TcMType.tcInstTyVarsX - mk_inst_ty subst (tv, result_inst_ty) - | is_fixed_tv tv -- Same as result type + mk_inst_ty subst (tv, result_inst_ty) + | is_fixed_tv tv -- Same as result type = return (extendTvSubst subst tv result_inst_ty, result_inst_ty) - | otherwise -- Fresh type, of correct kind + | otherwise -- Fresh type, of correct kind = do { new_ty <- newFlexiTyVarTy (TcType.substTy subst (tyVarKind tv)) ; return (extendTvSubst subst tv new_ty, new_ty) } - ; (_, result_inst_tys, result_subst) <- tcInstTyVars con1_tvs + ; (_, result_inst_tys, result_subst) <- tcInstTyVars con1_tvs - ; (scrut_subst, scrut_inst_tys) <- mapAccumLM mk_inst_ty emptyTvSubst - (con1_tvs `zip` result_inst_tys) + ; (scrut_subst, scrut_inst_tys) <- mapAccumLM mk_inst_ty emptyTvSubst + (con1_tvs `zip` result_inst_tys) - ; let rec_res_ty = TcType.substTy result_subst con1_res_ty - scrut_ty = TcType.substTy scrut_subst con1_res_ty - con1_arg_tys' = map (TcType.substTy result_subst) con1_arg_tys + ; let rec_res_ty = TcType.substTy result_subst con1_res_ty + scrut_ty = TcType.substTy scrut_subst con1_res_ty + con1_arg_tys' = map (TcType.substTy result_subst) con1_arg_tys ; co_res <- unifyType rec_res_ty res_ty - -- STEP 5 - -- Typecheck the thing to be updated, and the bindings - ; record_expr' <- tcMonoExpr record_expr scrut_ty - ; rbinds' <- tcRecordBinds con1 con1_arg_tys' rbinds - - -- STEP 6: Deal with the stupid theta - ; let theta' = substTheta scrut_subst (dataConStupidTheta con1) - ; instStupidTheta RecordUpdOrigin theta' - - -- Step 7: make a cast for the scrutinee, in the case that it's from a type family - ; let scrut_co | Just co_con <- tyConFamilyCoercion_maybe tycon - = WpCast (mkTcUnbranchedAxInstCo co_con scrut_inst_tys) - | otherwise - = idHsWrapper - -- Phew! + -- STEP 5 + -- Typecheck the thing to be updated, and the bindings + ; record_expr' <- tcMonoExpr record_expr scrut_ty + ; rbinds' <- tcRecordBinds con1 con1_arg_tys' rbinds + + -- STEP 6: Deal with the stupid theta + ; let theta' = substTheta scrut_subst (dataConStupidTheta con1) + ; instStupidTheta RecordUpdOrigin theta' + + -- Step 7: make a cast for the scrutinee, in the case that it's from a type family + ; let scrut_co | Just co_con <- tyConFamilyCoercion_maybe tycon + = WpCast (mkTcUnbranchedAxInstCo co_con scrut_inst_tys) + | otherwise + = idHsWrapper + -- Phew! ; return $ mkHsWrapCo co_res $ RecordUpd (mkLHsWrap scrut_co record_expr') rbinds' - relevant_cons scrut_inst_tys result_inst_tys } + relevant_cons scrut_inst_tys result_inst_tys } where upd_fld_names = hsRecFields rbinds @@ -738,28 +731,28 @@ tcExpr (RecordUpd record_expr rbinds _ _ _) res_ty -- These tyvars must not change across the updates getFixedTyVars tvs1 cons = mkVarSet [tv1 | con <- cons - , let (tvs, theta, arg_tys, _) = dataConSig con - flds = dataConFieldLabels con - fixed_tvs = exactTyVarsOfTypes fixed_tys - -- fixed_tys: See Note [Type of a record update] - `unionVarSet` tyVarsOfTypes theta - -- Universally-quantified tyvars that - -- appear in any of the *implicit* - -- arguments to the constructor are fixed - -- See Note [Implict type sharing] - - fixed_tys = [ty | (fld,ty) <- zip flds arg_tys + , let (tvs, theta, arg_tys, _) = dataConSig con + flds = dataConFieldLabels con + fixed_tvs = exactTyVarsOfTypes fixed_tys + -- fixed_tys: See Note [Type of a record update] + `unionVarSet` tyVarsOfTypes theta + -- Universally-quantified tyvars that + -- appear in any of the *implicit* + -- arguments to the constructor are fixed + -- See Note [Implict type sharing] + + fixed_tys = [ty | (fld,ty) <- zip flds arg_tys , not (fld `elem` upd_fld_names)] - , (tv1,tv) <- tvs1 `zip` tvs -- Discards existentials in tvs - , tv `elemVarSet` fixed_tvs ] + , (tv1,tv) <- tvs1 `zip` tvs -- Discards existentials in tvs + , tv `elemVarSet` fixed_tvs ] \end{code} %************************************************************************ -%* * - Arithmetic sequences e.g. [a,b..] - and their parallel-array counterparts e.g. [: a,b.. :] - -%* * +%* * + Arithmetic sequences e.g. [a,b..] + and their parallel-array counterparts e.g. [: a,b.. :] + +%* * %************************************************************************ \begin{code} @@ -767,27 +760,27 @@ tcExpr (ArithSeq _ witness seq) res_ty = tcArithSeq witness seq res_ty tcExpr (PArrSeq _ seq@(FromTo expr1 expr2)) res_ty - = do { (coi, elt_ty) <- matchExpectedPArrTy res_ty - ; expr1' <- tcPolyExpr expr1 elt_ty - ; expr2' <- tcPolyExpr expr2 elt_ty - ; enumFromToP <- initDsTc $ dsDPHBuiltin enumFromToPVar - ; enum_from_to <- newMethodFromName (PArrSeqOrigin seq) - (idName enumFromToP) elt_ty - ; return $ mkHsWrapCo coi + = do { (coi, elt_ty) <- matchExpectedPArrTy res_ty + ; expr1' <- tcPolyExpr expr1 elt_ty + ; expr2' <- tcPolyExpr expr2 elt_ty + ; enumFromToP <- initDsTc $ dsDPHBuiltin enumFromToPVar + ; enum_from_to <- newMethodFromName (PArrSeqOrigin seq) + (idName enumFromToP) elt_ty + ; return $ mkHsWrapCo coi (PArrSeq enum_from_to (FromTo expr1' expr2')) } tcExpr (PArrSeq _ seq@(FromThenTo expr1 expr2 expr3)) res_ty - = do { (coi, elt_ty) <- matchExpectedPArrTy res_ty - ; expr1' <- tcPolyExpr expr1 elt_ty - ; expr2' <- tcPolyExpr expr2 elt_ty - ; expr3' <- tcPolyExpr expr3 elt_ty - ; enumFromThenToP <- initDsTc $ dsDPHBuiltin enumFromThenToPVar - ; eft <- newMethodFromName (PArrSeqOrigin seq) - (idName enumFromThenToP) elt_ty -- !!!FIXME: chak - ; return $ mkHsWrapCo coi + = do { (coi, elt_ty) <- matchExpectedPArrTy res_ty + ; expr1' <- tcPolyExpr expr1 elt_ty + ; expr2' <- tcPolyExpr expr2 elt_ty + ; expr3' <- tcPolyExpr expr3 elt_ty + ; enumFromThenToP <- initDsTc $ dsDPHBuiltin enumFromThenToPVar + ; eft <- newMethodFromName (PArrSeqOrigin seq) + (idName enumFromThenToP) elt_ty -- !!!FIXME: chak + ; return $ mkHsWrapCo coi (PArrSeq eft (FromThenTo expr1' expr2' expr3')) } -tcExpr (PArrSeq _ _) _ +tcExpr (PArrSeq _ _) _ = panic "TcExpr.tcExpr: Infinite parallel array!" -- the parser shouldn't have generated it and the renamer shouldn't have -- let it through @@ -795,14 +788,14 @@ tcExpr (PArrSeq _ _) _ %************************************************************************ -%* * - Template Haskell -%* * +%* * + Template Haskell +%* * %************************************************************************ \begin{code} -#ifdef GHCI /* Only if bootstrapped */ - -- Rename excludes these cases otherwise +#ifdef GHCI /* Only if bootstrapped */ + -- Rename excludes these cases otherwise tcExpr (HsSpliceE splice) res_ty = tcSpliceExpr splice res_ty tcExpr (HsBracket brack) res_ty = tcBracket brack res_ty tcExpr e@(HsQuasiQuoteE _) _ = @@ -812,9 +805,9 @@ tcExpr e@(HsQuasiQuoteE _) _ = %************************************************************************ -%* * - Catch-all -%* * +%* * + Catch-all +%* * %************************************************************************ \begin{code} @@ -824,9 +817,9 @@ tcExpr other _ = pprPanic "tcMonoExpr" (ppr other) %************************************************************************ -%* * - Arithmetic sequences [a..b] etc -%* * +%* * + Arithmetic sequences [a..b] etc +%* * %************************************************************************ \begin{code} @@ -836,24 +829,24 @@ tcArithSeq :: Maybe (SyntaxExpr Name) -> ArithSeqInfo Name -> TcRhoType tcArithSeq witness seq@(From expr) res_ty = do { (coi, elt_ty, wit') <- arithSeqEltType witness res_ty ; expr' <- tcPolyExpr expr elt_ty - ; enum_from <- newMethodFromName (ArithSeqOrigin seq) - enumFromName elt_ty + ; enum_from <- newMethodFromName (ArithSeqOrigin seq) + enumFromName elt_ty ; return $ mkHsWrapCo coi (ArithSeq enum_from wit' (From expr')) } - + tcArithSeq witness seq@(FromThen expr1 expr2) res_ty = do { (coi, elt_ty, wit') <- arithSeqEltType witness res_ty ; expr1' <- tcPolyExpr expr1 elt_ty ; expr2' <- tcPolyExpr expr2 elt_ty - ; enum_from_then <- newMethodFromName (ArithSeqOrigin seq) - enumFromThenName elt_ty + ; enum_from_then <- newMethodFromName (ArithSeqOrigin seq) + enumFromThenName elt_ty ; return $ mkHsWrapCo coi (ArithSeq enum_from_then wit' (FromThen expr1' expr2')) } - + tcArithSeq witness seq@(FromTo expr1 expr2) res_ty = do { (coi, elt_ty, wit') <- arithSeqEltType witness res_ty ; expr1' <- tcPolyExpr expr1 elt_ty ; expr2' <- tcPolyExpr expr2 elt_ty - ; enum_from_to <- newMethodFromName (ArithSeqOrigin seq) - enumFromToName elt_ty + ; enum_from_to <- newMethodFromName (ArithSeqOrigin seq) + enumFromToName elt_ty ; return $ mkHsWrapCo coi (ArithSeq enum_from_to wit' (FromTo expr1' expr2')) } tcArithSeq witness seq@(FromThenTo expr1 expr2 expr3) res_ty @@ -861,12 +854,12 @@ tcArithSeq witness seq@(FromThenTo expr1 expr2 expr3) res_ty ; expr1' <- tcPolyExpr expr1 elt_ty ; expr2' <- tcPolyExpr expr2 elt_ty ; expr3' <- tcPolyExpr expr3 elt_ty - ; eft <- newMethodFromName (ArithSeqOrigin seq) - enumFromThenToName elt_ty + ; eft <- newMethodFromName (ArithSeqOrigin seq) + enumFromThenToName elt_ty ; return $ mkHsWrapCo coi (ArithSeq eft wit' (FromThenTo expr1' expr2' expr3')) } ----------------- -arithSeqEltType :: Maybe (SyntaxExpr Name) -> TcRhoType +arithSeqEltType :: Maybe (SyntaxExpr Name) -> TcRhoType -> TcM (TcCoercion, TcType, Maybe (SyntaxExpr Id)) arithSeqEltType Nothing res_ty = do { (coi, elt_ty) <- matchExpectedListTy res_ty @@ -879,9 +872,9 @@ arithSeqEltType (Just fl) res_ty \end{code} %************************************************************************ -%* * - Applications -%* * +%* * + Applications +%* * %************************************************************************ \begin{code} @@ -892,7 +885,7 @@ tcApp (L _ (HsPar e)) args res_ty = tcApp e args res_ty tcApp (L _ (HsApp e1 e2)) args res_ty - = tcApp e1 (e2:args) res_ty -- Accumulate the arguments + = tcApp e1 (e2:args) res_ty -- Accumulate the arguments tcApp (L loc (HsVar fun)) args res_ty | fun `hasKey` tagToEnumKey @@ -904,24 +897,24 @@ tcApp (L loc (HsVar fun)) args res_ty = tcSeq loc fun arg1 arg2 res_ty tcApp fun args res_ty - = do { -- Type-check the function - ; (fun1, fun_tau) <- tcInferFun fun + = do { -- Type-check the function + ; (fun1, fun_tau) <- tcInferFun fun - -- Extract its argument types - ; (co_fun, expected_arg_tys, actual_res_ty) - <- matchExpectedFunTys (mk_app_msg fun) (length args) fun_tau + -- Extract its argument types + ; (co_fun, expected_arg_tys, actual_res_ty) + <- matchExpectedFunTys (mk_app_msg fun) (length args) fun_tau - -- Typecheck the result, thereby propagating + -- Typecheck the result, thereby propagating -- info (if any) from result into the argument types -- Both actual_res_ty and res_ty are deeply skolemised ; co_res <- addErrCtxtM (funResCtxt True (unLoc fun) actual_res_ty res_ty) $ unifyType actual_res_ty res_ty - -- Typecheck the arguments - ; args1 <- tcArgs fun args expected_arg_tys + -- Typecheck the arguments + ; args1 <- tcArgs fun args expected_arg_tys -- Assemble the result - ; let fun2 = mkLHsWrapCo co_fun fun1 + ; let fun2 = mkLHsWrapCo co_fun fun1 app = mkLHsWrapCo co_res (foldl mkHsApp fun2 args1) ; return (unLoc app) } @@ -940,60 +933,60 @@ tcInferApp (L _ (HsApp e1 e2)) args = tcInferApp e1 (e2:args) tcInferApp fun args = -- Very like the tcApp version, except that there is -- no expected result type passed in - do { (fun1, fun_tau) <- tcInferFun fun - ; (co_fun, expected_arg_tys, actual_res_ty) - <- matchExpectedFunTys (mk_app_msg fun) (length args) fun_tau - ; args1 <- tcArgs fun args expected_arg_tys - ; let fun2 = mkLHsWrapCo co_fun fun1 + do { (fun1, fun_tau) <- tcInferFun fun + ; (co_fun, expected_arg_tys, actual_res_ty) + <- matchExpectedFunTys (mk_app_msg fun) (length args) fun_tau + ; args1 <- tcArgs fun args expected_arg_tys + ; let fun2 = mkLHsWrapCo co_fun fun1 app = foldl mkHsApp fun2 args1 ; return (unLoc app, actual_res_ty) } ---------------- tcInferFun :: LHsExpr Name -> TcM (LHsExpr TcId, TcRhoType) -- Infer and instantiate the type of a function -tcInferFun (L loc (HsVar name)) +tcInferFun (L loc (HsVar name)) = do { (fun, ty) <- setSrcSpan loc (tcInferId name) - -- Don't wrap a context around a plain Id + -- Don't wrap a context around a plain Id ; return (L loc fun, ty) } tcInferFun fun = do { (fun, fun_ty) <- tcInfer (tcMonoExpr fun) -- Zonk the function type carefully, to expose any polymorphism - -- E.g. (( \(x::forall a. a->a). blah ) e) - -- We can see the rank-2 type of the lambda in time to genrealise e + -- E.g. (( \(x::forall a. a->a). blah ) e) + -- We can see the rank-2 type of the lambda in time to genrealise e ; fun_ty' <- zonkTcType fun_ty ; (wrap, rho) <- deeplyInstantiate AppOrigin fun_ty' ; return (mkLHsWrap wrap fun, rho) } ---------------- -tcArgs :: LHsExpr Name -- The function (for error messages) - -> [LHsExpr Name] -> [TcSigmaType] -- Actual arguments and expected arg types - -> TcM [LHsExpr TcId] -- Resulting args +tcArgs :: LHsExpr Name -- The function (for error messages) + -> [LHsExpr Name] -> [TcSigmaType] -- Actual arguments and expected arg types + -> TcM [LHsExpr TcId] -- Resulting args tcArgs fun args expected_arg_tys = mapM (tcArg fun) (zip3 args expected_arg_tys [1..]) ---------------- -tcArg :: LHsExpr Name -- The function (for error messages) - -> (LHsExpr Name, TcSigmaType, Int) -- Actual argument and expected arg type - -> TcM (LHsExpr TcId) -- Resulting argument +tcArg :: LHsExpr Name -- The function (for error messages) + -> (LHsExpr Name, TcSigmaType, Int) -- Actual argument and expected arg type + -> TcM (LHsExpr TcId) -- Resulting argument tcArg fun (arg, ty, arg_no) = addErrCtxt (funAppCtxt fun arg arg_no) - (tcPolyExprNC arg ty) + (tcPolyExprNC arg ty) ---------------- tcTupArgs :: [HsTupArg Name] -> [TcSigmaType] -> TcM [HsTupArg TcId] -tcTupArgs args tys +tcTupArgs args tys = ASSERT( equalLength args tys ) mapM go (args `zip` tys) where go (Missing {}, arg_ty) = return (Missing arg_ty) go (Present expr, arg_ty) = do { expr' <- tcPolyExpr expr arg_ty - ; return (Present expr') } + ; return (Present expr') } ---------------- unifyOpFunTysWrap :: LHsExpr Name -> Arity -> TcRhoType - -> TcM (TcCoercion, [TcSigmaType], TcRhoType) + -> TcM (TcCoercion, [TcSigmaType], TcRhoType) -- A wrapper for matchExpectedFunTys unifyOpFunTysWrap op arity ty = matchExpectedFunTys herald arity ty where @@ -1006,7 +999,7 @@ tcSyntaxOp :: CtOrigin -> HsExpr Name -> TcType -> TcM (HsExpr TcId) -- This version assumes res_ty is a monotype tcSyntaxOp orig (HsVar op) res_ty = do { (expr, rho) <- tcInferIdWithOrig orig op ; tcWrapResult expr rho res_ty } -tcSyntaxOp _ other _ = pprPanic "tcSyntaxOp" (ppr other) +tcSyntaxOp _ other _ = pprPanic "tcSyntaxOp" (ppr other) \end{code} @@ -1015,8 +1008,8 @@ Note [Push result type in] Unify with expected result before type-checking the args so that the info from res_ty percolates to args. This is when we might detect a too-few args situation. (One can think of cases when the opposite -order would give a better error message.) -experimenting with putting this first. +order would give a better error message.) +experimenting with putting this first. Here's an example where it actually makes a real difference @@ -1036,14 +1029,14 @@ in the other order, the extra signature in f2 is reqd. %************************************************************************ -%* * +%* * tcInferId -%* * +%* * %************************************************************************ \begin{code} tcCheckId :: Name -> TcRhoType -> TcM (HsExpr TcId) -tcCheckId name res_ty +tcCheckId name res_ty = do { (expr, actual_res_ty) <- tcInferId name ; addErrCtxtM (funResCtxt False (HsVar name) actual_res_ty res_ty) $ tcWrapResult expr actual_res_ty res_ty } @@ -1064,29 +1057,29 @@ tcInferIdWithOrig orig id_name ; return (mkHsWrap wrap id_expr, rho) } where lookup_id :: TcM TcId - lookup_id + lookup_id = do { thing <- tcLookup id_name - ; case thing of - ATcId { tct_id = id, tct_level = lvl } - -> do { check_naughty id -- Note [Local record selectors] + ; case thing of + ATcId { tct_id = id, tct_level = lvl } + -> do { check_naughty id -- Note [Local record selectors] ; checkThLocalId id lvl ; return id } - AGlobal (AnId id) + AGlobal (AnId id) -> do { check_naughty id; return id } - -- A global cannot possibly be ill-staged - -- nor does it need the 'lifting' treatment + -- A global cannot possibly be ill-staged + -- nor does it need the 'lifting' treatment -- hence no checkTh stuff here - AGlobal (ADataCon con) -> return (dataConWrapId con) + AGlobal (ADataCon con) -> return (dataConWrapId con) - other -> failWithTc (bad_lookup other) } + other -> failWithTc (bad_lookup other) } bad_lookup thing = ppr thing <+> ptext (sLit "used where a value identifer was expected") - check_naughty id + check_naughty id | isNaughtyRecordSelector id = failWithTc (naughtyRecordSel id) - | otherwise = return () + | otherwise = return () ------------------------ instantiateOuter :: CtOrigin -> TcId -> TcM (HsExpr TcId, TcSigmaType) @@ -1115,20 +1108,20 @@ Note [Multiple instantiation] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ We are careful never to make a MethodInst that has, as its meth_id, another MethodInst. For example, consider - f :: forall a. Eq a => forall b. Ord b => a -> b -At a call to f, at say [Int, Bool], it's tempting to translate the call to + f :: forall a. Eq a => forall b. Ord b => a -> b +At a call to f, at say [Int, Bool], it's tempting to translate the call to - f_m1 + f_m1 where - f_m1 :: forall b. Ord b => Int -> b - f_m1 = f Int dEqInt + f_m1 :: forall b. Ord b => Int -> b + f_m1 = f Int dEqInt - f_m2 :: Int -> Bool - f_m2 = f_m1 Bool dOrdBool + f_m2 :: Int -> Bool + f_m2 = f_m1 Bool dOrdBool But notice that f_m2 has f_m1 as its meth_id. Now the danger is that if we do a tcSimplCheck with a Given f_mx :: f Int dEqInt, we may make a binding - f_m1 = f_mx + f_m1 = f_mx But it's entirely possible that f_m2 will continue to float out, because it mentions no type variables. Result, f_m1 isn't in scope. @@ -1146,8 +1139,8 @@ application, not for the iterated ones. A horribly subtle point. \begin{code} doStupidChecks :: TcId - -> [TcType] - -> TcM () + -> [TcType] + -> TcM () -- Check two tiresome and ad-hoc cases -- (a) the "stupid theta" for a data con; add the constraints -- from the "stupid theta" of a data constructor (sigh) @@ -1158,7 +1151,7 @@ doStupidChecks fun_id tys | fun_id `hasKey` tagToEnumKey -- (b) = failWithTc (ptext (sLit "tagToEnum# must appear applied to one argument")) - + | otherwise = return () -- The common case \end{code} @@ -1170,33 +1163,33 @@ enumeration TyCon. Unification may refine the type later, but this check won't see that, alas. It's crude, because it relies on our knowing *now* that the type is ok, which in turn relies on the eager-unification part of the type checker pushing enough information -here. In theory the Right Thing to do is to have a new form of +here. In theory the Right Thing to do is to have a new form of constraint but I definitely cannot face that! And it works ok as-is. Here's are two cases that should fail - f :: forall a. a - f = tagToEnum# 0 -- Can't do tagToEnum# at a type variable + f :: forall a. a + f = tagToEnum# 0 -- Can't do tagToEnum# at a type variable - g :: Int - g = tagToEnum# 0 -- Int is not an enumeration + g :: Int + g = tagToEnum# 0 -- Int is not an enumeration When data type families are involved it's a bit more complicated. data family F a data instance F [Int] = A | B | C Then we want to generate something like tagToEnum# R:FListInt 3# |> co :: R:FListInt ~ F [Int] -Usually that coercion is hidden inside the wrappers for +Usually that coercion is hidden inside the wrappers for constructors of F [Int] but here we have to do it explicitly. It's all grotesquely complicated. \begin{code} -tcSeq :: SrcSpan -> Name -> LHsExpr Name -> LHsExpr Name +tcSeq :: SrcSpan -> Name -> LHsExpr Name -> LHsExpr Name -> TcRhoType -> TcM (HsExpr TcId) -- (seq e1 e2) :: res_ty -- We need a special typing rule because res_ty can be unboxed tcSeq loc fun_name arg1 arg2 res_ty - = do { fun <- tcLookupId fun_name + = do { fun <- tcLookupId fun_name ; (arg1', arg1_ty) <- tcInfer (tcMonoExpr arg1) ; arg2' <- tcMonoExpr arg2 res_ty ; let fun' = L loc (HsWrap ty_args (HsVar fun)) @@ -1207,43 +1200,43 @@ tcTagToEnum :: SrcSpan -> Name -> LHsExpr Name -> TcRhoType -> TcM (HsExpr TcId) -- tagToEnum# :: forall a. Int# -> a -- See Note [tagToEnum#] Urgh! tcTagToEnum loc fun_name arg res_ty - = do { fun <- tcLookupId fun_name + = do { fun <- tcLookupId fun_name ; ty' <- zonkTcType res_ty - -- Check that the type is algebraic + -- Check that the type is algebraic ; let mb_tc_app = tcSplitTyConApp_maybe ty' Just (tc, tc_args) = mb_tc_app - ; checkTc (isJust mb_tc_app) + ; checkTc (isJust mb_tc_app) (tagToEnumError ty' doc1) - -- Look through any type family + -- Look through any type family ; (coi, rep_tc, rep_args) <- get_rep_ty ty' tc tc_args - ; checkTc (isEnumerationTyCon rep_tc) + ; checkTc (isEnumerationTyCon rep_tc) (tagToEnumError ty' doc2) ; arg' <- tcMonoExpr arg intPrimTy ; let fun' = L loc (HsWrap (WpTyApp rep_ty) (HsVar fun)) rep_ty = mkTyConApp rep_tc rep_args - ; return (mkHsWrapCo coi $ HsApp fun' arg') } + ; return (mkHsWrapCo coi $ HsApp fun' arg') } where doc1 = vcat [ ptext (sLit "Specify the type by giving a type signature") - , ptext (sLit "e.g. (tagToEnum# x) :: Bool") ] + , ptext (sLit "e.g. (tagToEnum# x) :: Bool") ] doc2 = ptext (sLit "Result type must be an enumeration type") doc3 = ptext (sLit "No family instance for this type") get_rep_ty :: TcType -> TyCon -> [TcType] -> TcM (TcCoercion, TyCon, [TcType]) - -- Converts a family type (eg F [a]) to its rep type (eg FList a) - -- and returns a coercion between the two + -- Converts a family type (eg F [a]) to its rep type (eg FList a) + -- and returns a coercion between the two get_rep_ty ty tc tc_args - | not (isFamilyTyCon tc) + | not (isFamilyTyCon tc) = return (mkTcReflCo ty, tc, tc_args) - | otherwise + | otherwise = do { mb_fam <- tcLookupFamInst tc tc_args - ; case mb_fam of - Nothing -> failWithTc (tagToEnumError ty doc3) + ; case mb_fam of + Nothing -> failWithTc (tagToEnumError ty doc3) Just (FamInstMatch { fim_instance = rep_fam , fim_tys = rep_args }) -> return ( mkTcSymCo (mkTcUnbranchedAxInstCo co_tc rep_args) @@ -1254,16 +1247,16 @@ tcTagToEnum loc fun_name arg res_ty tagToEnumError :: TcType -> SDoc -> SDoc tagToEnumError ty what - = hang (ptext (sLit "Bad call to tagToEnum#") - <+> ptext (sLit "at type") <+> ppr ty) - 2 what + = hang (ptext (sLit "Bad call to tagToEnum#") + <+> ptext (sLit "at type") <+> ppr ty) + 2 what \end{code} %************************************************************************ -%* * +%* * Template Haskell checks -%* * +%* * %************************************************************************ \begin{code} @@ -1274,73 +1267,73 @@ checkThLocalId :: Id -> ThLevel -> TcM () checkThLocalId _id _bind_lvl = return () -#else /* GHCI and TH is on */ -checkThLocalId id bind_lvl - = do { use_stage <- getStage -- TH case - ; let use_lvl = thLevel use_stage - ; checkWellStaged (quotes (ppr id)) bind_lvl use_lvl - ; traceTc "thLocalId" (ppr id <+> ppr bind_lvl <+> ppr use_stage <+> ppr use_lvl) - ; when (use_lvl > bind_lvl) $ +#else /* GHCI and TH is on */ +checkThLocalId id bind_lvl + = do { use_stage <- getStage -- TH case + ; let use_lvl = thLevel use_stage + ; checkWellStaged (quotes (ppr id)) bind_lvl use_lvl + ; traceTc "thLocalId" (ppr id <+> ppr bind_lvl <+> ppr use_stage <+> ppr use_lvl) + ; when (use_lvl > bind_lvl) $ checkCrossStageLifting id bind_lvl use_stage } -------------------------------------- checkCrossStageLifting :: Id -> ThLevel -> ThStage -> TcM () -- We are inside brackets, and (use_lvl > bind_lvl) -- Now we must check whether there's a cross-stage lift to do --- Examples \x -> [| x |] +-- Examples \x -> [| x |] -- [| map |] checkCrossStageLifting _ _ Comp = return () checkCrossStageLifting _ _ Splice = return () -checkCrossStageLifting id _ (Brack _ ps_var lie_var) +checkCrossStageLifting id _ (Brack _ ps_var lie_var) | thTopLevelId id - = -- Top-level identifiers in this module, - -- (which have External Names) - -- are just like the imported case: - -- no need for the 'lifting' treatment - -- E.g. this is fine: - -- f x = x - -- g y = [| f 3 |] - -- But we do need to put f into the keep-alive - -- set, because after desugaring the code will - -- only mention f's *name*, not f itself. + = -- Top-level identifiers in this module, + -- (which have External Names) + -- are just like the imported case: + -- no need for the 'lifting' treatment + -- E.g. this is fine: + -- f x = x + -- g y = [| f 3 |] + -- But we do need to put f into the keep-alive + -- set, because after desugaring the code will + -- only mention f's *name*, not f itself. keepAliveTc id - | otherwise -- bind_lvl = outerLevel presumably, - -- but the Id is not bound at top level - = -- Nested identifiers, such as 'x' in - -- E.g. \x -> [| h x |] - -- We must behave as if the reference to x was - -- h $(lift x) - -- We use 'x' itself as the splice proxy, used by - -- the desugarer to stitch it all back together. - -- If 'x' occurs many times we may get many identical - -- bindings of the same splice proxy, but that doesn't - -- matter, although it's a mite untidy. - do { let id_ty = idType id + | otherwise -- bind_lvl = outerLevel presumably, + -- but the Id is not bound at top level + = -- Nested identifiers, such as 'x' in + -- E.g. \x -> [| h x |] + -- We must behave as if the reference to x was + -- h $(lift x) + -- We use 'x' itself as the splice proxy, used by + -- the desugarer to stitch it all back together. + -- If 'x' occurs many times we may get many identical + -- bindings of the same splice proxy, but that doesn't + -- matter, although it's a mite untidy. + do { let id_ty = idType id ; checkTc (isTauTy id_ty) (polySpliceErr id) - -- If x is polymorphic, its occurrence sites might - -- have different instantiations, so we can't use plain - -- 'x' as the splice proxy name. I don't know how to - -- solve this, and it's probably unimportant, so I'm - -- just going to flag an error for now - - ; lift <- if isStringTy id_ty then - do { sid <- tcLookupId DsMeta.liftStringName - -- See Note [Lifting strings] + -- If x is polymorphic, its occurrence sites might + -- have different instantiations, so we can't use plain + -- 'x' as the splice proxy name. I don't know how to + -- solve this, and it's probably unimportant, so I'm + -- just going to flag an error for now + + ; lift <- if isStringTy id_ty then + do { sid <- tcLookupId DsMeta.liftStringName + -- See Note [Lifting strings] ; return (HsVar sid) } - else - setConstraintVar lie_var $ do - -- Put the 'lift' constraint into the right LIE - newMethodFromName (OccurrenceOf (idName id)) + else + setConstraintVar lie_var $ do + -- Put the 'lift' constraint into the right LIE + newMethodFromName (OccurrenceOf (idName id)) DsMeta.liftName id_ty - - -- Update the pending splices - ; ps <- readMutVar ps_var - ; writeMutVar ps_var ((idName id, nlHsApp (noLoc lift) (nlHsVar id)) : ps) - ; return () } + -- Update the pending splices + ; ps <- readMutVar ps_var + ; writeMutVar ps_var ((idName id, nlHsApp (noLoc lift) (nlHsVar id)) : ps) + + ; return () } #endif /* GHCI */ \end{code} @@ -1351,10 +1344,10 @@ generate a mass of Cons (CharL 'x') (Cons (CharL 'y') ...)) etc. So this conditional short-circuits the lifting mechanism to generate (liftString "xy") in that case. I didn't want to use overlapping instances for the Lift class in TH.Syntax, because that can lead to overlapping-instance -errors in a polymorphic situation. +errors in a polymorphic situation. If this check fails (which isn't impossible) we get another chance; see -Note [Converting strings] in Convert.lhs +Note [Converting strings] in Convert.lhs Local record selectors ~~~~~~~~~~~~~~~~~~~~~~ @@ -1364,9 +1357,9 @@ naughtiness in both branches. c.f. TcTyClsBindings.mkAuxBinds. %************************************************************************ -%* * +%* * \subsection{Record bindings} -%* * +%* * %************************************************************************ Game plan for record bindings @@ -1380,84 +1373,84 @@ For each binding field = value 3. Instantiate the field type (from the field label) using the type envt from step 2. -4 Type check the value using tcArg, passing the field type as +4 Type check the value using tcArg, passing the field type as the expected argument type. This extends OK when the field types are universally quantified. - + \begin{code} tcRecordBinds - :: DataCon - -> [TcType] -- Expected type for each field - -> HsRecordBinds Name - -> TcM (HsRecordBinds TcId) + :: DataCon + -> [TcType] -- Expected type for each field + -> HsRecordBinds Name + -> TcM (HsRecordBinds TcId) tcRecordBinds data_con arg_tys (HsRecFields rbinds dd) - = do { mb_binds <- mapM do_bind rbinds - ; return (HsRecFields (catMaybes mb_binds) dd) } + = do { mb_binds <- mapM do_bind rbinds + ; return (HsRecFields (catMaybes mb_binds) dd) } where flds_w_tys = zipEqual "tcRecordBinds" (dataConFieldLabels data_con) arg_tys do_bind fld@(HsRecField { hsRecFieldId = L loc field_lbl, hsRecFieldArg = rhs }) | Just field_ty <- assocMaybe flds_w_tys field_lbl - = addErrCtxt (fieldCtxt field_lbl) $ - do { rhs' <- tcPolyExprNC rhs field_ty - ; let field_id = mkUserLocal (nameOccName field_lbl) - (nameUnique field_lbl) - field_ty loc - -- Yuk: the field_id has the *unique* of the selector Id - -- (so we can find it easily) - -- but is a LocalId with the appropriate type of the RHS - -- (so the desugarer knows the type of local binder to make) - ; return (Just (fld { hsRecFieldId = L loc field_id, hsRecFieldArg = rhs' })) } + = addErrCtxt (fieldCtxt field_lbl) $ + do { rhs' <- tcPolyExprNC rhs field_ty + ; let field_id = mkUserLocal (nameOccName field_lbl) + (nameUnique field_lbl) + field_ty loc + -- Yuk: the field_id has the *unique* of the selector Id + -- (so we can find it easily) + -- but is a LocalId with the appropriate type of the RHS + -- (so the desugarer knows the type of local binder to make) + ; return (Just (fld { hsRecFieldId = L loc field_id, hsRecFieldArg = rhs' })) } | otherwise = do { addErrTc (badFieldCon data_con field_lbl) - ; return Nothing } + ; return Nothing } checkMissingFields :: DataCon -> HsRecordBinds Name -> TcM () checkMissingFields data_con rbinds - | null field_labels -- Not declared as a record; - -- But C{} is still valid if no strict fields + | null field_labels -- Not declared as a record; + -- But C{} is still valid if no strict fields = if any isBanged field_strs then - -- Illegal if any arg is strict - addErrTc (missingStrictFields data_con []) + -- Illegal if any arg is strict + addErrTc (missingStrictFields data_con []) else - return () - - | otherwise = do -- A record + return () + + | otherwise = do -- A record unless (null missing_s_fields) - (addErrTc (missingStrictFields data_con missing_s_fields)) + (addErrTc (missingStrictFields data_con missing_s_fields)) warn <- woptM Opt_WarnMissingFields unless (not (warn && notNull missing_ns_fields)) - (warnTc True (missingFields data_con missing_ns_fields)) + (warnTc True (missingFields data_con missing_ns_fields)) where missing_s_fields - = [ fl | (fl, str) <- field_info, - isBanged str, - not (fl `elem` field_names_used) - ] + = [ fl | (fl, str) <- field_info, + isBanged str, + not (fl `elem` field_names_used) + ] missing_ns_fields - = [ fl | (fl, str) <- field_info, - not (isBanged str), - not (fl `elem` field_names_used) - ] + = [ fl | (fl, str) <- field_info, + not (isBanged str), + not (fl `elem` field_names_used) + ] field_names_used = hsRecFields rbinds field_labels = dataConFieldLabels data_con field_info = zipEqual "missingFields" - field_labels - field_strs + field_labels + field_strs field_strs = dataConStrictMarks data_con \end{code} %************************************************************************ -%* * +%* * \subsection{Errors and contexts} -%* * +%* * %************************************************************************ Boring and alphabetical: @@ -1475,17 +1468,17 @@ fieldCtxt field_name funAppCtxt :: LHsExpr Name -> LHsExpr Name -> Int -> SDoc funAppCtxt fun arg arg_no - = hang (hsep [ ptext (sLit "In the"), speakNth arg_no, ptext (sLit "argument of"), - quotes (ppr fun) <> text ", namely"]) + = hang (hsep [ ptext (sLit "In the"), speakNth arg_no, ptext (sLit "argument of"), + quotes (ppr fun) <> text ", namely"]) 2 (quotes (ppr arg)) funResCtxt :: Bool -- There is at least one argument - -> HsExpr Name -> TcType -> TcType + -> HsExpr Name -> TcType -> TcType -> TidyEnv -> TcM (TidyEnv, MsgDoc) -- When we have a mis-match in the return type of a function -- try to give a helpful message about too many/few arguments -- --- Used for naked variables too; but with has_args = False +-- Used for naked variables too; but with has_args = False funResCtxt has_args fun fun_res_ty env_ty tidy_env = do { fun_res' <- zonkTcType fun_res_ty ; env' <- zonkTcType env_ty @@ -1504,7 +1497,7 @@ funResCtxt has_args fun fun_res_ty env_ty tidy_env -- applied to too many args! ; return (tidy_env, info) } where - not_fun ty -- ty is definitely not an arrow type, + not_fun ty -- ty is definitely not an arrow type, -- and cannot conceivably become one = case tcSplitTyConApp_maybe ty of Just (tc, _) -> isAlgTyCon tc @@ -1513,7 +1506,7 @@ funResCtxt has_args fun fun_res_ty env_ty tidy_env badFieldTypes :: [(Name,TcType)] -> SDoc badFieldTypes prs = hang (ptext (sLit "Record update for insufficiently polymorphic field") - <> plural prs <> colon) + <> plural prs <> colon) 2 (vcat [ ppr f <+> dcolon <+> ppr ty | (f,ty) <- prs ]) badFieldsUpd :: HsRecFields Name a -> SDoc @@ -1523,8 +1516,8 @@ badFieldsUpd rbinds naughtyRecordSel :: TcId -> SDoc naughtyRecordSel sel_id - = ptext (sLit "Cannot use record selector") <+> quotes (ppr sel_id) <+> - ptext (sLit "as a function due to escaped type variables") $$ + = ptext (sLit "Cannot use record selector") <+> quotes (ppr sel_id) <+> + ptext (sLit "as a function due to escaped type variables") $$ ptext (sLit "Probable fix: use pattern-matching syntax instead") notSelector :: Name -> SDoc @@ -1535,17 +1528,17 @@ missingStrictFields :: DataCon -> [FieldLabel] -> SDoc missingStrictFields con fields = header <> rest where - rest | null fields = empty -- Happens for non-record constructors - -- with strict fields - | otherwise = colon <+> pprWithCommas ppr fields + rest | null fields = empty -- Happens for non-record constructors + -- with strict fields + | otherwise = colon <+> pprWithCommas ppr fields + + header = ptext (sLit "Constructor") <+> quotes (ppr con) <+> + ptext (sLit "does not have the required strict field(s)") - header = ptext (sLit "Constructor") <+> quotes (ppr con) <+> - ptext (sLit "does not have the required strict field(s)") - missingFields :: DataCon -> [FieldLabel] -> SDoc missingFields con fields - = ptext (sLit "Fields of") <+> quotes (ppr con) <+> ptext (sLit "not initialised:") - <+> pprWithCommas ppr fields + = ptext (sLit "Fields of") <+> quotes (ppr con) <+> ptext (sLit "not initialised:") + <+> pprWithCommas ppr fields -- callCtxt fun args = ptext (sLit "In the call") <+> parens (ppr (foldl mkHsApp fun args)) diff --git a/compiler/typecheck/TcForeign.lhs b/compiler/typecheck/TcForeign.lhs index 630157ee79a4..f65681ed1ef3 100644 --- a/compiler/typecheck/TcForeign.lhs +++ b/compiler/typecheck/TcForeign.lhs @@ -74,9 +74,9 @@ normaliseFfiType ty normaliseFfiType' fam_envs ty normaliseFfiType' :: FamInstEnvs -> Type -> TcM (Coercion, Type, Bag GlobalRdrElt) -normaliseFfiType' env ty0 = go [] ty0 +normaliseFfiType' env ty0 = go initRecTc ty0 where - go :: [TyCon] -> Type -> TcM (Coercion, Type, Bag GlobalRdrElt) + go :: RecTcChecker -> Type -> TcM (Coercion, Type, Bag GlobalRdrElt) go rec_nts ty | Just ty' <- coreView ty -- Expand synonyms = go rec_nts ty' @@ -87,8 +87,15 @@ normaliseFfiType' env ty0 = go [] ty0 = children_only | isNewTyCon tc -- Expand newtypes + , Just rec_nts' <- checkRecTc rec_nts tc + -- See Note [Expanding newtypes] in TyCon.lhs + -- We can't just use isRecursiveTyCon; sometimes recursion is ok: + -- newtype T = T (Ptr T) + -- Here, we don't reject the type for being recursive. + -- If this is a recursive newtype then it will normally + -- be rejected later as not being a valid FFI type. = do { rdr_env <- getGlobalRdrEnv - ; case checkNewtypeFFI rdr_env rec_nts tc of + ; case checkNewtypeFFI rdr_env tc of Nothing -> children_only Just gre -> do { (co', ty', gres) <- go rec_nts' nt_rhs ; return (mkTransCo nt_co co', ty', gre `consBag` gres) } } @@ -110,9 +117,6 @@ normaliseFfiType' env ty0 = go [] ty0 nt_co = mkUnbranchedAxInstCo (newTyConCo tc) tys nt_rhs = newTyConInstRhs tc tys - rec_nts' | isRecursiveTyCon tc = tc:rec_nts - | otherwise = rec_nts - go rec_nts (AppTy ty1 ty2) = do (coi1, nty1, gres1) <- go rec_nts ty1 (coi2, nty2, gres2) <- go rec_nts ty2 @@ -131,16 +135,9 @@ normaliseFfiType' env ty0 = go [] ty0 go _ ty@(LitTy {}) = return (Refl ty, ty, emptyBag) -checkNewtypeFFI :: GlobalRdrEnv -> [TyCon] -> TyCon -> Maybe GlobalRdrElt -checkNewtypeFFI rdr_env rec_nts tc - | not (tc `elem` rec_nts) - -- See Note [Expanding newtypes] in Type.lhs - -- We can't just use isRecursiveTyCon; sometimes recursion is ok: - -- newtype T = T (Ptr T) - -- Here, we don't reject the type for being recursive. - -- If this is a recursive newtype then it will normally - -- be rejected later as not being a valid FFI type. - , Just con <- tyConSingleDataCon_maybe tc +checkNewtypeFFI :: GlobalRdrEnv -> TyCon -> Maybe GlobalRdrElt +checkNewtypeFFI rdr_env tc + | Just con <- tyConSingleDataCon_maybe tc , [gre] <- lookupGRE_Name rdr_env (dataConName con) = Just gre -- See Note [Newtype constructor usage in foreign declarations] | otherwise diff --git a/compiler/typecheck/TcGenDeriv.lhs b/compiler/typecheck/TcGenDeriv.lhs index b31d7418fa41..77bda8274bd3 100644 --- a/compiler/typecheck/TcGenDeriv.lhs +++ b/compiler/typecheck/TcGenDeriv.lhs @@ -880,7 +880,25 @@ The latter desugares to inline code for matching the Ident and the string, and this can be very voluminous. The former is much more compact. Cf Trac #7258, although that also concerned non-linearity in the occurrence analyser, a separate issue. - + +Note [Read for empty data types] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +What should we get for this? (Trac #7931) + data Emp deriving( Read ) -- No data constructors + +Here we want + read "[]" :: [Emp] to succeed, returning [] +So we do NOT want + instance Read Emp where + readPrec = error "urk" +Rather we want + instance Read Emp where + readPred = pfail -- Same as choose [] + +Because 'pfail' allows the parser to backtrack, but 'error' doesn't. +These instances are also useful for Read (Either Int Emp), where +we want to be able to parse (Left 3) just fine. + \begin{code} gen_Read_binds :: FixityEnv -> SrcSpan -> TyCon -> (LHsBinds RdrName, BagDerivStuff) @@ -901,7 +919,7 @@ gen_Read_binds get_fixity loc tycon read_prec = mkHsVarBind loc readPrec_RDR (nlHsApp (nlHsVar parens_RDR) read_cons) - read_cons | null data_cons = error_Expr "Derived Read on empty data type" -- Trac #7931 + read_cons | null data_cons = nlHsVar pfail_RDR -- See Note [Read for empty data types] | otherwise = foldr1 mk_alt (read_nullary_cons ++ read_non_nullary_cons) read_non_nullary_cons = map read_non_nullary_con non_nullary_cons diff --git a/compiler/typecheck/TcGenGenerics.lhs b/compiler/typecheck/TcGenGenerics.lhs index a03923651cba..f4765e942546 100644 --- a/compiler/typecheck/TcGenGenerics.lhs +++ b/compiler/typecheck/TcGenGenerics.lhs @@ -160,11 +160,11 @@ metaTyConsToDerivStuff tc metaDts = (myZip2 s_insts s_binds) myZip1 :: [a] -> [b] -> [(a,b)] - myZip1 l1 l2 = ASSERT (length l1 == length l2) zip l1 l2 + myZip1 l1 l2 = ASSERT(length l1 == length l2) zip l1 l2 myZip2 :: [[a]] -> [[b]] -> [[(a,b)]] myZip2 l1 l2 = - ASSERT (and (zipWith (>=) (map length l1) (map length l2))) + ASSERT(and (zipWith (>=) (map length l1) (map length l2))) [ zip x1 x2 | (x1,x2) <- zip l1 l2 ] return $ mapBag DerivTyCon (metaTyCons2TyCons metaDts) @@ -388,7 +388,7 @@ mkBindsRep gk tycon = (from_alts, to_alts) = mkSum gk_ (1 :: US) tycon datacons where gk_ = case gk of Gen0 -> Gen0_ - Gen1 -> ASSERT (length tyvars >= 1) + Gen1 -> ASSERT(length tyvars >= 1) Gen1_ (last tyvars) where tyvars = tyConTyVars tycon @@ -415,7 +415,7 @@ tc_mkRepFamInsts gk tycon metaDts mod = ; let -- `tyvars` = [a,b] (tyvars, gk_) = case gk of Gen0 -> (all_tyvars, Gen0_) - Gen1 -> ASSERT (not $ null all_tyvars) + Gen1 -> ASSERT(not $ null all_tyvars) (init all_tyvars, Gen1_ $ last all_tyvars) where all_tyvars = tyConTyVars tycon @@ -553,16 +553,16 @@ tc_mkRepTy gk_ tycon metaDts = -- Sums and products are done in the same way for both Rep and Rep1 sumP [] = mkTyConTy v1 - sumP l = ASSERT (length metaCTyCons == length l) + sumP l = ASSERT(length metaCTyCons == length l) foldBal mkSum' [ mkC i d a | (d,(a,i)) <- zip metaCTyCons (zip l [0..])] -- The Bool is True if this constructor has labelled fields prod :: Int -> [Type] -> Bool -> Type - prod i [] _ = ASSERT (length metaSTyCons > i) - ASSERT (length (metaSTyCons !! i) == 0) + prod i [] _ = ASSERT(length metaSTyCons > i) + ASSERT(length (metaSTyCons !! i) == 0) mkTyConTy u1 - prod i l b = ASSERT (length metaSTyCons > i) - ASSERT (length l == length (metaSTyCons !! i)) + prod i l b = ASSERT(length metaSTyCons > i) + ASSERT(length l == length (metaSTyCons !! i)) foldBal mkProd [ arg d t b | (d,t) <- zip (metaSTyCons !! i) l ] diff --git a/compiler/typecheck/TcHsSyn.lhs b/compiler/typecheck/TcHsSyn.lhs index 04e5582df152..8e10303869d8 100644 --- a/compiler/typecheck/TcHsSyn.lhs +++ b/compiler/typecheck/TcHsSyn.lhs @@ -1324,7 +1324,9 @@ zonkTcTypeToType env ty = go ty where go (TyConApp tc tys) = do tys' <- mapM go tys - return (TyConApp tc tys') + return (mkTyConApp tc tys') + -- Establish Type invariants + -- See Note [Zonking inside the knot] in TcHsType go (LitTy n) = return (LitTy n) diff --git a/compiler/typecheck/TcHsType.lhs b/compiler/typecheck/TcHsType.lhs index 100d92cff5b4..c7d65a6b57fe 100644 --- a/compiler/typecheck/TcHsType.lhs +++ b/compiler/typecheck/TcHsType.lhs @@ -45,7 +45,6 @@ import {-# SOURCE #-} TcSplice( tcSpliceType ) #endif import HsSyn -import TcHsSyn ( zonkTcTypeToType, emptyZonkEnv ) import TcRnMonad import TcEvidence( HsWrapper ) import TcEnv @@ -55,8 +54,8 @@ import TcUnify import TcIface import TcType import Type +import TypeRep( Type(..) ) -- For the mkNakedXXX stuff import Kind -import TypeRep( mkNakedTyConApp ) import Var import VarSet import TyCon @@ -185,7 +184,7 @@ tcHsSigTypeNC ctxt (L loc hs_ty) ; ty <- tcCheckHsTypeAndGen hs_ty kind -- Zonk to expose kind information to checkValidType - ; ty <- zonkTcType ty + ; ty <- zonkSigType ty ; checkValidType ctxt ty ; return ty } @@ -197,7 +196,7 @@ tcHsInstHead user_ctxt lhs_ty@(L loc hs_ty) do { inst_ty <- tc_inst_head hs_ty ; kvs <- zonkTcTypeAndFV inst_ty ; kvs <- kindGeneralize kvs - ; inst_ty <- zonkTcType (mkForAllTys kvs inst_ty) + ; inst_ty <- zonkSigType (mkForAllTys kvs inst_ty) ; checkValidInstance user_ctxt lhs_ty inst_ty } tc_inst_head :: HsType Name -> TcM TcType @@ -219,7 +218,7 @@ tcHsDeriv hs_ty -- Funny newtype deriving form -- forall a. C [a] -- where C has arity 2. Hence any-kinded result - ; ty <- zonkTcType ty + ; ty <- zonkSigType ty ; let (tvs, pred) = splitForAllTys ty ; case getClassPredTys_maybe pred of Just (cls, tys) -> return (tvs, cls, tys) @@ -255,7 +254,7 @@ tcClassSigType :: LHsType Name -> TcM Type tcClassSigType lhs_ty@(L _ hs_ty) = addTypeCtxt lhs_ty $ do { ty <- tcCheckHsTypeAndGen hs_ty liftedTypeKind - ; zonkTcTypeToType emptyZonkEnv ty } + ; zonkSigType ty } tcHsConArgType :: NewOrData -> LHsType Name -> TcM Type -- Permit a bang, but discard it @@ -378,11 +377,11 @@ tc_hs_type hs_ty@(HsOpTy ty1 (_, l_op@(L _ op)) ty2) exp_kind -- mkNakedAppTys: see Note [Zonking inside the knot] tc_hs_type hs_ty@(HsAppTy ty1 ty2) exp_kind - | L _ (HsTyVar fun) <- fun_ty - , fun `hasKey` funTyConKey - , [fty1,fty2] <- arg_tys - = tc_fun_type hs_ty fty1 fty2 exp_kind - | otherwise +-- | L _ (HsTyVar fun) <- fun_ty +-- , fun `hasKey` funTyConKey +-- , [fty1,fty2] <- arg_tys +-- = tc_fun_type hs_ty fty1 fty2 exp_kind +-- | otherwise = do { (fun_ty', fun_kind) <- tc_infer_lhs_type fun_ty ; arg_tys' <- tcCheckApps hs_ty fun_ty fun_kind arg_tys exp_kind ; return (mkNakedAppTys fun_ty' arg_tys') } @@ -704,10 +703,76 @@ can't change it. So we must traverse the type. BUT the parent TyCon is knot-tied, so we can't look at it yet. So we must be careful not to use "smart constructors" for types that -look at the TyCon or Class involved. Hence the use of mkNakedXXX -functions. +look at the TyCon or Class involved. + + * Hence the use of mkNakedXXX functions. These do *not* enforce + the invariants (for example that we use (FunTy s t) rather + than (TyConApp (->) [s,t])). + + * Ditto in zonkTcType (which may be applied more than once, eg to + squeeze out kind meta-variables), we are careful not to look at + the TyCon. + + * We arrange to call zonkSigType *once* right at the end, and it + does establish the invariants. But in exchange we can't look + at the result (not even its structure) until we have emerged + from the "knot". + + * TcHsSyn.zonkTcTypeToType also can safely check/establish + invariants. -This is sadly delicate. +This is horribly delicate. I hate it. A good example of how +delicate it is can be seen in Trac #7903. + +\begin{code} +mkNakedTyConApp :: TyCon -> [Type] -> Type +-- Builds a TyConApp +-- * without being strict in TyCon, +-- * without satisfying the invariants of TyConApp +-- A subsequent zonking will establish the invariants +mkNakedTyConApp tc tys = TyConApp tc tys + +mkNakedAppTys :: Type -> [Type] -> Type +mkNakedAppTys ty1 [] = ty1 +mkNakedAppTys (TyConApp tc tys1) tys2 = mkNakedTyConApp tc (tys1 ++ tys2) +mkNakedAppTys ty1 tys2 = foldl AppTy ty1 tys2 + +zonkSigType :: TcType -> TcM TcType +-- Zonk the result of type-checking a user-written type signature +-- It may have kind varaibles in it, but no meta type variables +-- Because of knot-typing (see Note [Zonking inside the knot]) +-- it may need to establish the Type invariants; +-- hence the use of mkTyConApp and mkAppTy +zonkSigType ty + = go ty + where + go (TyConApp tc tys) = do tys' <- mapM go tys + return (mkTyConApp tc tys') + -- Key point: establish Type invariants! + -- See Note [Zonking inside the knot] + + go (LitTy n) = return (LitTy n) + + go (FunTy arg res) = do arg' <- go arg + res' <- go res + return (FunTy arg' res') + + go (AppTy fun arg) = do fun' <- go fun + arg' <- go arg + return (mkAppTy fun' arg') + -- NB the mkAppTy; we might have instantiated a + -- type variable to a type constructor, so we need + -- to pull the TyConApp to the top. + + -- The two interesting cases! + go (TyVarTy tyvar) | isTcTyVar tyvar = zonkTcTyVar tyvar + | otherwise = TyVarTy <$> updateTyVarKindM go tyvar + -- Ordinary (non Tc) tyvars occur inside quantified types + + go (ForAllTy tv ty) = do { tv' <- zonkTcTyVarBndr tv + ; ty' <- go ty + ; return (ForAllTy tv' ty') } +\end{code} Note [Body kind of a forall] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1180,7 +1245,7 @@ tcHsPatSigType ctxt (HsWB { hswb_cts = hs_ty, hswb_kvs = sig_kvs, hswb_tvs = sig ; let ktv_binds = (sig_kvs `zip` kvs) ++ (sig_tvs `zip` tvs) ; sig_ty <- tcExtendTyVarEnv2 ktv_binds $ tcHsLiftedType hs_ty - ; sig_ty <- zonkTcType sig_ty + ; sig_ty <- zonkSigType sig_ty ; checkValidType ctxt sig_ty ; return (sig_ty, ktv_binds) } where diff --git a/compiler/typecheck/TcInstDcls.lhs b/compiler/typecheck/TcInstDcls.lhs index 2dd2a002fcd4..37c91ca4d668 100644 --- a/compiler/typecheck/TcInstDcls.lhs +++ b/compiler/typecheck/TcInstDcls.lhs @@ -41,7 +41,6 @@ import TcEnv import TcHsType import TcUnify import MkCore ( nO_METHOD_BINDING_ERROR_ID ) -import CoreSyn ( DFunArg(..) ) import Type import TcEvidence import TyCon @@ -50,10 +49,10 @@ import DataCon import Class import Var import VarEnv -import VarSet ( mkVarSet, subVarSet, varSetElems ) +import VarSet import Pair import CoreUnfold ( mkDFunUnfolding ) -import CoreSyn ( Expr(Var), CoreExpr ) +import CoreSyn ( Expr(Var, Type), CoreExpr, mkTyApps, mkVarApps ) import PrelNames ( tYPEABLE_INTERNAL, typeableClassName, oldTypeableClassNames ) import Bag @@ -189,9 +188,9 @@ Instead we use a cunning trick. a suitable constructor application -- inlining df "on the fly" as it were. - * We give the ClassOp 'op2' a BuiltinRule that extracts the right piece - iff its argument satisfies exprIsConApp_maybe. This is done in - MkId mkDictSelId + * ClassOp rules: We give the ClassOp 'op2' a BuiltinRule that + extracts the right piece iff its argument satisfies + exprIsConApp_maybe. This is done in MkId mkDictSelId * We make 'df' CONLIKE, so that shared uses still match; eg let d = df d1 d2 @@ -694,8 +693,9 @@ tcDataFamInstDecl mb_clsinfo NewType -> ASSERT( not (null data_cons) ) mkNewTyConRhs rep_tc_name rec_rep_tc (head data_cons) -- freshen tyvars - ; let axiom = mkSingleCoAxiom axiom_name tvs' fam_tc pats' - (mkTyConApp rep_tc (mkTyVarTys tvs')) + ; let (eta_tvs, eta_pats) = eta_reduce tvs' pats' + axiom = mkSingleCoAxiom axiom_name eta_tvs fam_tc eta_pats + (mkTyConApp rep_tc (mkTyVarTys eta_tvs)) parent = FamInstTyCon axiom fam_tc pats' rep_tc = buildAlgTyCon rep_tc_name tvs' cType stupid_theta tc_rhs Recursive @@ -712,8 +712,44 @@ tcDataFamInstDecl mb_clsinfo -- Remember to check validity; no recursion to worry about here ; checkValidTyCon rep_tc ; return fam_inst } } + where + -- See Note [Eta reduction for data family axioms] + -- [a,b,c,d].T [a] c Int c d ==> [a,b,c]. T [a] c Int c + eta_reduce tvs pats = go (reverse tvs) (reverse pats) + go (tv:tvs) (pat:pats) + | Just tv' <- getTyVar_maybe pat + , tv == tv' + , not (tv `elemVarSet` tyVarsOfTypes pats) + = go tvs pats + go tvs pats = (reverse tvs, reverse pats) \end{code} +Note [Eta reduction for data family axioms] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Consider this + data family T a b :: * + newtype instance T Int a = MkT (IO a) deriving( Monad ) +We'd like this to work. From the 'newtype instance' you might +think we'd get: + newtype TInt a = MkT (IO a) + axiom ax1 a :: T Int a ~ TInt a -- The type-instance part + axiom ax2 a :: TInt a ~ IO a -- The newtype part + +But now what can we do? We have this problem + Given: d :: Monad IO + Wanted: d' :: Monad (T Int) = d |> ???? +What coercion can we use for the ??? + +Solution: eta-reduce both axioms, thus: + axiom ax1 :: T Int ~ TInt + axiom ax2 :: TInt ~ IO +Now + d' = d |> Monad (sym (ax2 ; ax1)) + +See Note [Newtype eta] in TyCon. + + + %************************************************************************ %* * Type-checking instance declarations, pass 2 @@ -777,12 +813,11 @@ tcInstDecl2 (InstInfo { iSpec = ispec, iBinds = ibinds }) ; dfun_ev_vars <- newEvVars dfun_theta - ; (sc_binds, sc_ev_vars, sc_dfun_args) - <- tcSuperClasses dfun_id inst_tyvars dfun_ev_vars sc_theta' + ; (sc_binds, sc_ev_vars) <- tcSuperClasses dfun_id inst_tyvars dfun_ev_vars sc_theta' -- Deal with 'SPECIALISE instance' pragmas -- See Note [SPECIALISE instance pragmas] - ; spec_inst_info <- tcSpecInstPrags dfun_id ibinds + ; spec_inst_info@(spec_inst_prags,_) <- tcSpecInstPrags dfun_id ibinds -- Typecheck the methods ; (meth_ids, meth_binds) @@ -812,11 +847,10 @@ tcInstDecl2 (InstInfo { iSpec = ispec, iBinds = ibinds }) con_app_tys = wrapId (mkWpTyApps inst_tys) (dataConWrapId dict_constr) con_app_scs = mkHsWrap (mkWpEvApps (map EvId sc_ev_vars)) con_app_tys - con_app_args = foldl mk_app con_app_scs $ - map (wrapId arg_wrapper) meth_ids + con_app_args = foldl app_to_meth con_app_scs meth_ids - mk_app :: HsExpr Id -> HsExpr Id -> HsExpr Id - mk_app fun arg = HsApp (L loc fun) (L loc arg) + app_to_meth :: HsExpr Id -> Id -> HsExpr Id + app_to_meth fun meth_id = L loc fun `HsApp` L loc (wrapId arg_wrapper meth_id) inst_tv_tys = mkTyVarTys inst_tyvars arg_wrapper = mkWpEvVarApps dfun_ev_vars <.> mkWpTyApps inst_tv_tys @@ -824,19 +858,26 @@ tcInstDecl2 (InstInfo { iSpec = ispec, iBinds = ibinds }) -- Do not inline the dfun; instead give it a magic DFunFunfolding -- See Note [ClassOp/DFun selection] -- See also note [Single-method classes] - dfun_id_w_fun + (dfun_id_w_fun, dfun_spec_prags) | isNewTyCon class_tc - = dfun_id `setInlinePragma` alwaysInlinePragma { inl_sat = Just 0 } + = ( dfun_id `setInlinePragma` alwaysInlinePragma { inl_sat = Just 0 } + , SpecPrags [] ) -- Newtype dfuns just inline unconditionally, + -- so don't attempt to specialise them | otherwise - = dfun_id `setIdUnfolding` mkDFunUnfolding dfun_ty dfun_args - `setInlinePragma` dfunInlinePragma + = ( dfun_id `setIdUnfolding` mkDFunUnfolding (inst_tyvars ++ dfun_ev_vars) + dict_constr dfun_args + `setInlinePragma` dfunInlinePragma + , SpecPrags spec_inst_prags ) - dfun_args :: [DFunArg CoreExpr] - dfun_args = sc_dfun_args ++ map (DFunPolyArg . Var) meth_ids + dfun_args :: [CoreExpr] + dfun_args = map Type inst_tys ++ + map Var sc_ev_vars ++ + map mk_meth_app meth_ids + mk_meth_app meth_id = Var meth_id `mkTyApps` inst_tv_tys `mkVarApps` dfun_ev_vars export = ABE { abe_wrap = idHsWrapper, abe_poly = dfun_id_w_fun - , abe_mono = self_dict, abe_prags = noSpecPrags } - -- NB: noSpecPrags, see Note [SPECIALISE instance pragmas] + , abe_mono = self_dict, abe_prags = dfun_spec_prags } + -- NB: see Note [SPECIALISE instance pragmas] main_bind = AbsBinds { abs_tvs = inst_tyvars , abs_ev_vars = dfun_ev_vars , abs_exports = [export] @@ -847,13 +888,12 @@ tcInstDecl2 (InstInfo { iSpec = ispec, iBinds = ibinds }) listToBag meth_binds) } where - dfun_ty = idType dfun_id - dfun_id = instanceDFunId ispec - loc = getSrcSpan dfun_id + dfun_id = instanceDFunId ispec + loc = getSrcSpan dfun_id ------------------------------ tcSuperClasses :: DFunId -> [TcTyVar] -> [EvVar] -> TcThetaType - -> TcM (TcEvBinds, [EvVar], [DFunArg CoreExpr]) + -> TcM (TcEvBinds, [EvVar]) -- See Note [Silent superclass arguments] tcSuperClasses dfun_id inst_tyvars dfun_ev_vars sc_theta = do { -- Check that all superclasses can be deduced from @@ -862,19 +902,18 @@ tcSuperClasses dfun_id inst_tyvars dfun_ev_vars sc_theta emitWanteds ScOrigin sc_theta ; if null inst_tyvars && null dfun_ev_vars - then return (sc_binds, sc_evs, map (DFunPolyArg . Var) sc_evs) - else return (emptyTcEvBinds, sc_lam_args, sc_dfun_args) } + then return (sc_binds, sc_evs) + else return (emptyTcEvBinds, sc_lam_args) } where n_silent = dfunNSilent dfun_id - n_tv_args = length inst_tyvars orig_ev_vars = drop n_silent dfun_ev_vars - (sc_lam_args, sc_dfun_args) = unzip (map (find n_tv_args dfun_ev_vars) sc_theta) - find _ [] pred + sc_lam_args = map (find dfun_ev_vars) sc_theta + find [] pred = pprPanic "tcInstDecl2" (ppr dfun_id $$ ppr (idType dfun_id) $$ ppr pred) - find i (ev:evs) pred - | pred `eqPred` evVarPred ev = (ev, DFunLamArg i) - | otherwise = find (i+1) evs pred + find (ev:evs) pred + | pred `eqPred` evVarPred ev = ev + | otherwise = find evs pred ---------------------- mkMethIds :: HsSigFun -> Class -> [TcTyVar] -> [EvVar] @@ -1037,35 +1076,56 @@ Consider {-# SPECIALISE instance Ix (Int,Int) #-} range (x,y) = ... -We do *not* want to make a specialised version of the dictionary -function. Rather, we want specialised versions of each *method*. -Thus we should generate something like this: +We make a specialised version of the dictionary function, AND +specialised versions of each *method*. Thus we should generate +something like this: $dfIxPair :: (Ix a, Ix b) => Ix (a,b) - {- DFUN [$crangePair, ...] -} + {-# DFUN [$crangePair, ...] #-} + {-# SPECIALISE $dfIxPair :: Ix (Int,Int) #-} $dfIxPair da db = Ix ($crangePair da db) (...other methods...) $crange :: (Ix a, Ix b) -> ((a,b),(a,b)) -> [(a,b)] {-# SPECIALISE $crange :: ((Int,Int),(Int,Int)) -> [(Int,Int)] #-} $crange da db = - {-# RULE range ($dfIx da db) = $crange da db #-} +The SPECIALISE pragmas are acted upon by the desugarer, which generate + dii :: Ix Int + dii = ... + + $s$dfIxPair :: Ix ((Int,Int),(Int,Int)) + {-# DFUN [$crangePair di di, ...] #-} + $s$dfIxPair = Ix ($crangePair di di) (...) + + {-# RULE forall (d1,d2:Ix Int). $dfIxPair Int Int d1 d2 = $s$dfIxPair #-} + + $s$crangePair :: ((Int,Int),(Int,Int)) -> [(Int,Int)] + $c$crangePair = ...specialised RHS of $crangePair... + + {-# RULE forall (d1,d2:Ix Int). $crangePair Int Int d1 d2 = $s$crangePair #-} + Note that - * The RULE is unaffected by the specialisation. We don't want to - specialise $dfIx, because then it would need a specialised RULE - which is a pain. The single RULE works fine at all specialisations. - See Note [How instance declarations are translated] above + * The specialised dictionary $s$dfIxPair is very much needed, in case we + call a function that takes a dictionary, but in a context where the + specialised dictionary can be used. See Trac #7797. - * Instead, we want to specialise the *method*, $crange + * The ClassOp rule for 'range' works equally well on $s$dfIxPair, because + it still has a DFunUnfolding. See Note [ClassOp/DFun selection] + + * A call (range ($dfIxPair Int Int d1 d2)) might simplify two ways: + --> {ClassOp rule for range} $crangePair Int Int d1 d2 + --> {SPEC rule for $crangePair} $s$crangePair + or thus: + --> {SPEC rule for $dfIxPair} range $s$dfIxPair + --> {ClassOpRule for range} $s$crangePair + It doesn't matter which way. + + * We want to specialise the RHS of both $dfIxPair and $crangePair, + but the SAME HsWrapper will do for both! We can call tcSpecPrag + just once, and pass the result (in spec_inst_info) to tcInstanceMethods. -In practice, rather than faking up a SPECIALISE pragama for each -method (which is painful, since we'd have to figure out its -specialised type), we call tcSpecPrag *as if* were going to specialise -$dfIx -- you can see that in the call to tcSpecInst. That generates a -SpecPrag which, as it turns out, can be used unchanged for each method. -The "it turns out" bit is delicate, but it works fine! \begin{code} tcSpecInst :: Id -> Sig Name -> TcM TcSpecPrag @@ -1418,7 +1478,6 @@ That is, just as if you'd written So for the above example we generate: - {-# INLINE $dmop1 #-} -- $dmop1 has an InlineCompulsory unfolding $dmop1 d b x = op2 d (not b) x diff --git a/compiler/typecheck/TcInteract.lhs b/compiler/typecheck/TcInteract.lhs index 653f343f206e..27cf52e85e79 100644 --- a/compiler/typecheck/TcInteract.lhs +++ b/compiler/typecheck/TcInteract.lhs @@ -242,9 +242,14 @@ spontaneousSolveStage workItem = do { mb_solved <- trySpontaneousSolve workItem ; case mb_solved of SPCantSolve - | CTyEqCan { cc_tyvar = tv, cc_ev = fl } <- workItem + | CTyEqCan { cc_tyvar = tv, cc_rhs = rhs, cc_ev = fl } <- workItem -- Unsolved equality - -> do { n_kicked <- kickOutRewritable (ctEvFlavour fl) tv + -> do { untch <- getUntouchables + ; traceTcS "Can't solve tyvar equality" + (vcat [ text "LHS:" <+> ppr tv <+> dcolon <+> ppr (tyVarKind tv) + , text "RHS:" <+> ppr rhs <+> dcolon <+> ppr (typeKind rhs) + , text "Untouchables =" <+> ppr untch ]) + ; n_kicked <- kickOutRewritable (ctEvFlavour fl) tv ; traceFireTcS workItem $ ptext (sLit "Kept as inert") <+> ppr_kicked n_kicked <> colon <+> ppr workItem @@ -403,7 +408,8 @@ trySpontaneousSolve :: WorkItem -> TcS SPSolveResult trySpontaneousSolve workItem@(CTyEqCan { cc_ev = gw , cc_tyvar = tv1, cc_rhs = xi, cc_loc = d }) | isGiven gw - = return SPCantSolve + = do { traceTcS "No spontaneous solve for given" (ppr workItem) + ; return SPCantSolve } | Just tv2 <- tcGetTyVar_maybe xi = do { tch1 <- isTouchableMetaTyVarTcS tv1 ; tch2 <- isTouchableMetaTyVarTcS tv2 @@ -411,21 +417,17 @@ trySpontaneousSolve workItem@(CTyEqCan { cc_ev = gw (True, True) -> trySpontaneousEqTwoWay d gw tv1 tv2 (True, False) -> trySpontaneousEqOneWay d gw tv1 xi (False, True) -> trySpontaneousEqOneWay d gw tv2 (mkTyVarTy tv1) - _ -> return SPCantSolve } + _ -> return SPCantSolve } | otherwise = do { tch1 <- isTouchableMetaTyVarTcS tv1 ; if tch1 then trySpontaneousEqOneWay d gw tv1 xi - else do { untch <- getUntouchables - ; traceTcS "Untouchable LHS, can't spontaneously solve workitem" $ - vcat [text "Untouchables =" <+> ppr untch - , text "Workitem =" <+> ppr workItem ] - ; return SPCantSolve } - } + else return SPCantSolve } -- No need for -- trySpontaneousSolve (CFunEqCan ...) = ... -- See Note [No touchables as FunEq RHS] in TcSMonad -trySpontaneousSolve _ = return SPCantSolve +trySpontaneousSolve item = do { traceTcS "Spont: no tyvar on lhs" (ppr item) + ; return SPCantSolve } ---------------- trySpontaneousEqOneWay :: CtLoc -> CtEvidence @@ -456,57 +458,6 @@ trySpontaneousEqTwoWay d gw tv1 tv2 nicer_to_update_tv2 = isSigTyVar tv1 || isSystemName (Var.varName tv2) \end{code} -Note [Kind errors] -~~~~~~~~~~~~~~~~~~ -Consider the wanted problem: - alpha ~ (# Int, Int #) -where alpha :: ArgKind and (# Int, Int #) :: (#). We can't spontaneously solve this constraint, -but we should rather reject the program that give rise to it. If 'trySpontaneousEqTwoWay' -simply returns @CantSolve@ then that wanted constraint is going to propagate all the way and -get quantified over in inference mode. That's bad because we do know at this point that the -constraint is insoluble. Instead, we call 'recKindErrorTcS' here, which will fail later on. - -The same applies in canonicalization code in case of kind errors in the givens. - -However, when we canonicalize givens we only check for compatibility (@compatKind@). -If there were a kind error in the givens, this means some form of inconsistency or dead code. - -You may think that when we spontaneously solve wanteds we may have to look through the -bindings to determine the right kind of the RHS type. E.g one may be worried that xi is -@alpha@ where alpha :: ? and a previous spontaneous solving has set (alpha := f) with (f :: *). -But we orient our constraints so that spontaneously solved ones can rewrite all other constraint -so this situation can't happen. - -Note [Spontaneous solving and kind compatibility] -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -Note that our canonical constraints insist that *all* equalities (tv ~ -xi) or (F xis ~ rhs) require the LHS and the RHS to have *compatible* -the same kinds. ("compatible" means one is a subKind of the other.) - - - It can't be *equal* kinds, because - b) wanted constraints don't necessarily have identical kinds - eg alpha::? ~ Int - b) a solved wanted constraint becomes a given - - - SPJ thinks that *given* constraints (tv ~ tau) always have that - tau has a sub-kind of tv; and when solving wanted constraints - in trySpontaneousEqTwoWay we re-orient to achieve this. - - - Note that the kind invariant is maintained by rewriting. - Eg wanted1 rewrites wanted2; if both were compatible kinds before, - wanted2 will be afterwards. Similarly givens. - -Caveat: - - Givens from higher-rank, such as: - type family T b :: * -> * -> * - type instance T Bool = (->) - - f :: forall a. ((T a ~ (->)) => ...) -> a -> ... - flop = f (...) True - Whereas we would be able to apply the type instance, we would not be able to - use the given (T Bool ~ (->)) in the body of 'flop' - - Note [Avoid double unifications] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The spontaneous solver has to return a given which mentions the unified unification @@ -526,8 +477,6 @@ double unifications is the main reason we disallow touchable unification variables as RHS of type family equations: F xis ~ alpha. \begin{code} ----------------- - solveWithIdentity :: CtLoc -> CtEvidence -> TcTyVar -> Xi -> TcS SPSolveResult -- Solve with the identity coercion -- Precondition: kind(xi) is a sub-kind of kind(tv) @@ -1466,7 +1415,7 @@ doTopReactDict inerts fl cls xis loc doTopReactFunEq :: Ct -> CtEvidence -> TyCon -> [Xi] -> Xi -> CtLoc -> TcS TopInteractResult doTopReactFunEq _ct fl fun_tc args xi loc - = ASSERT (isSynFamilyTyCon fun_tc) -- No associated data families have + = ASSERT(isSynFamilyTyCon fun_tc) -- No associated data families have -- reached this far -- Look in the cache of solved funeqs do { fun_eq_cache <- getTcSInerts >>= (return . inert_solved_funeqs) diff --git a/compiler/typecheck/TcMType.lhs b/compiler/typecheck/TcMType.lhs index cce760db5d76..3034df248a91 100644 --- a/compiler/typecheck/TcMType.lhs +++ b/compiler/typecheck/TcMType.lhs @@ -53,7 +53,7 @@ module TcMType ( skolemiseSigTv, skolemiseUnboundMetaTyVar, zonkTcTyVar, zonkTcTyVars, zonkTyVarsAndFV, zonkTcTypeAndFV, zonkQuantifiedTyVar, quantifyTyVars, - zonkTcType, zonkTcTypes, zonkTcThetaType, + zonkTcTyVarBndr, zonkTcType, zonkTcTypes, zonkTcThetaType, zonkTcKind, defaultKindVarToStar, zonkEvVar, zonkWC, zonkId, zonkCt, zonkCts, zonkSkolemInfo, @@ -532,7 +532,7 @@ quantifyTyVars gbl_tvs tkvs else do { let (meta_kvs, skolem_kvs) = partition is_meta kvs2 is_meta kv = isTcTyVar kv && isMetaTyVar kv ; mapM_ defaultKindVarToStar meta_kvs - ; WARN ( not (null skolem_kvs), ppr skolem_kvs ) + ; WARN( not (null skolem_kvs), ppr skolem_kvs ) return skolem_kvs } -- Should be empty ; mapM zonk_quant (qkvs ++ qtvs) } @@ -582,7 +582,7 @@ zonkQuantifiedTyVar tv defaultKindVarToStar :: TcTyVar -> TcM Kind -- We have a meta-kind: unify it with '*' defaultKindVarToStar kv - = do { ASSERT ( isKindVar kv && isMetaTyVar kv ) + = do { ASSERT( isKindVar kv && isMetaTyVar kv ) writeMetaTyVar kv liftedTypeKind ; return liftedTypeKind } @@ -932,6 +932,9 @@ zonkTcType ty where go (TyConApp tc tys) = do tys' <- mapM go tys return (TyConApp tc tys') + -- Do NOT establish Type invariants, because + -- doing so is strict in the TyCOn. + -- See Note [Zonking inside the knot] in TcHsType go (LitTy n) = return (LitTy n) @@ -945,6 +948,9 @@ zonkTcType ty -- NB the mkAppTy; we might have instantiated a -- type variable to a type constructor, so we need -- to pull the TyConApp to the top. + -- OK to do this because only strict in the structure + -- not in the TyCon. + -- See Note [Zonking inside the knot] in TcHsType -- The two interesting cases! go (TyVarTy tyvar) | isTcTyVar tyvar = zonkTcTyVar tyvar diff --git a/compiler/typecheck/TcPat.lhs b/compiler/typecheck/TcPat.lhs index fd9acee346a7..cb49d4de2f26 100644 --- a/compiler/typecheck/TcPat.lhs +++ b/compiler/typecheck/TcPat.lhs @@ -685,6 +685,7 @@ tcConPat penv (L con_span con_name) pat_ty arg_pats thing_inside arg_tys' = substTys tenv arg_tys + ; traceTc "tcConPat" (ppr con_name $$ ppr ex_tvs' $$ ppr pat_ty' $$ ppr arg_tys') ; if null ex_tvs && null eq_spec && null theta then do { -- The common case; no class bindings etc -- (see Note [Arrows and patterns]) diff --git a/compiler/typecheck/TcRnTypes.lhs b/compiler/typecheck/TcRnTypes.lhs index ad7b12a001e3..43b4f36aa20c 100644 --- a/compiler/typecheck/TcRnTypes.lhs +++ b/compiler/typecheck/TcRnTypes.lhs @@ -885,7 +885,8 @@ data Ct cc_ev :: CtEvidence, -- See Note [Ct/evidence invariant] -- The ctev_pred of the evidence is -- of form (tv xi1 xi2 ... xin) - -- or (t1 ~ t2) where not (kind(t1) `compatKind` kind(t2) + -- or (tv1 ~ ty2) where the CTyEqCan kind invariant fails + -- or (F tys ~ ty) where the CFunEqCan kind invariant fails -- See Note [CIrredEvCan constraints] cc_loc :: CtLoc } @@ -893,8 +894,8 @@ data Ct | CTyEqCan { -- tv ~ xi (recall xi means function free) -- Invariant: -- * tv not in tvs(xi) (occurs check) - -- * typeKind xi `compatKind` typeKind tv - -- See Note [Spontaneous solving and kind compatibility] + -- * typeKind xi `subKind` typeKind tv + -- See Note [Kind orientation for CTyEqCan] -- * We prefer unification variables on the left *JUST* for efficiency cc_ev :: CtEvidence, -- See Note [Ct/evidence invariant] cc_tyvar :: TcTyVar, @@ -904,7 +905,8 @@ data Ct | CFunEqCan { -- F xis ~ xi -- Invariant: * isSynFamilyTyCon cc_fun - -- * typeKind (F xis) `compatKind` typeKind xi + -- * typeKind (F xis) `subKind` typeKind xi + -- See Note [Kind orientation for CFunEqCan] cc_ev :: CtEvidence, -- See Note [Ct/evidence invariant] cc_fun :: TyCon, -- A type function cc_tyargs :: [Xi], -- Either under-saturated or exactly saturated @@ -926,6 +928,49 @@ data Ct } \end{code} +Note [Kind orientation for CTyEqCan] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Given an equality (t:* ~ s:Open), we absolutely want to re-orient it. +We can't solve it by updating t:=s, ragardless of how touchable 't' is, +because the kinds don't work. Indeed we don't want to leave it with +the orientation (t ~ s), becuase if that gets into the inert set we'll +start replacing t's by s's, and that too is the wrong way round. + +Hence in a CTyEqCan, (t:k1 ~ xi:k2) we require that k2 is a subkind of k1. + +If the two have incompatible kinds, we just don't use a CTyEqCan at all. +See Note [Equalities with incompatible kinds] in TcCanonical + +We can't require *equal* kinds, because + * wanted constraints don't necessarily have identical kinds + eg alpha::? ~ Int + * a solved wanted constraint becomes a given + +Note [Kind orientation for CFunEqCan] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +For (F xis ~ rhs) we require that kind(rhs) is a subkind of kind(lhs). +This reallly only maters when rhs is an Open type variable (since only type +variables have Open kinds): + F ty ~ (a:Open) +which can happen, say, from + f :: F a b + f = undefined -- The a:Open comes from instantiating 'undefined' + +Note that the kind invariant is maintained by rewriting. +Eg wanted1 rewrites wanted2; if both were compatible kinds before, + wanted2 will be afterwards. Similarly givens. + +Caveat: + - Givens from higher-rank, such as: + type family T b :: * -> * -> * + type instance T Bool = (->) + + f :: forall a. ((T a ~ (->)) => ...) -> a -> ... + flop = f (...) True + Whereas we would be able to apply the type instance, we would not be able to + use the given (T Bool ~ (->)) in the body of 'flop' + + Note [CIrredEvCan constraints] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CIrredEvCan constraints are used for constraints that are "stuck" diff --git a/compiler/typecheck/TcSMonad.lhs b/compiler/typecheck/TcSMonad.lhs index f215ac3dfeff..9a7049fccaef 100644 --- a/compiler/typecheck/TcSMonad.lhs +++ b/compiler/typecheck/TcSMonad.lhs @@ -81,8 +81,6 @@ module TcSMonad ( newFlexiTcSTy, instFlexiTcS, instFlexiTcSHelperTcS, cloneMetaTyVar, - compatKind, mkKindErrorCtxtTcS, - Untouchables, isTouchableMetaTyVarTcS, isFilledMetaTyVar_maybe, zonkTyVarsAndFV, @@ -110,7 +108,6 @@ import qualified TcRnMonad as TcM import qualified TcMType as TcM import qualified TcEnv as TcM ( checkWellStaged, topIdLvl, tcGetDefaultTys ) -import {-# SOURCE #-} qualified TcUnify as TcM ( mkKindErrorCtxt ) import Kind import TcType import DynFlags @@ -148,19 +145,6 @@ import Digraph #endif \end{code} - -\begin{code} -compatKind :: Kind -> Kind -> Bool -compatKind k1 k2 = k1 `tcIsSubKind` k2 || k2 `tcIsSubKind` k1 - -mkKindErrorCtxtTcS :: Type -> Kind - -> Type -> Kind - -> ErrCtxt -mkKindErrorCtxtTcS ty1 ki1 ty2 ki2 - = (False,TcM.mkKindErrorCtxt ty1 ty2 ki1 ki2) - -\end{code} - %************************************************************************ %* * %* Worklists * diff --git a/compiler/typecheck/TcSimplify.lhs b/compiler/typecheck/TcSimplify.lhs index 2cbb5af78aaa..b17f3950a449 100644 --- a/compiler/typecheck/TcSimplify.lhs +++ b/compiler/typecheck/TcSimplify.lhs @@ -23,6 +23,7 @@ import TcMType as TcM import TcType import TcSMonad as TcS import TcInteract +import Kind ( defaultKind_maybe ) import Inst import FunDeps ( growThetaTyVars ) import Type ( classifyPredType, PredTree(..), getClassPredTys_maybe ) @@ -76,34 +77,53 @@ simplifyTop wanteds simpl_top :: WantedConstraints -> TcS WantedConstraints simpl_top wanteds = do { wc_first_go <- nestTcS (solve_wanteds_and_drop wanteds) - ; free_tvs <- TcS.zonkTyVarsAndFV (tyVarsOfWC wc_first_go) - ; let meta_tvs = filterVarSet isMetaTyVar free_tvs + -- This is where the main work happens + ; try_tyvar_defaulting wc_first_go } + + try_tyvar_defaulting :: WantedConstraints -> TcS WantedConstraints + try_tyvar_defaulting wc + | isEmptyWC wc + = return wc + | otherwise + = do { free_tvs <- TcS.zonkTyVarsAndFV (tyVarsOfWC wc) + ; let meta_tvs = varSetElems (filterVarSet isMetaTyVar free_tvs) -- zonkTyVarsAndFV: the wc_first_go is not yet zonked -- filter isMetaTyVar: we might have runtime-skolems in GHCi, -- and we definitely don't want to try to assign to those! - ; mapM_ defaultTyVar (varSetElems meta_tvs) -- Has unification side effects - ; simpl_top_loop wc_first_go } + ; meta_tvs' <- mapM defaultTyVar meta_tvs -- Has unification side effects + ; if meta_tvs' == meta_tvs -- No defaulting took place; + -- (defaulting returns fresh vars) + then try_class_defaulting wc + else do { wc_residual <- nestTcS (solve_wanteds_and_drop wc) + -- See Note [Must simplify after defaulting] + ; try_class_defaulting wc_residual } } - simpl_top_loop wc + try_class_defaulting :: WantedConstraints -> TcS WantedConstraints + try_class_defaulting wc | isEmptyWC wc || insolubleWC wc - -- Don't do type-class defaulting if there are insolubles - -- Doing so is not going to solve the insolubles - = return wc + = return wc -- Don't do type-class defaulting if there are insolubles + -- Doing so is not going to solve the insolubles | otherwise - = do { wc_residual <- nestTcS (solve_wanteds_and_drop wc) - ; let wc_flat_approximate = approximateWC wc_residual - ; something_happened <- applyDefaultingRules wc_flat_approximate - -- See Note [Top-level Defaulting Plan] - ; if something_happened then - simpl_top_loop wc_residual - else - return wc_residual } + = do { something_happened <- applyDefaultingRules (approximateWC wc) + -- See Note [Top-level Defaulting Plan] + ; if something_happened + then do { wc_residual <- nestTcS (solve_wanteds_and_drop wc) + ; try_class_defaulting wc_residual } + else return wc } \end{code} +Note [Must simplify after defaulting] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +We may have a deeply buried constraint + (t:*) ~ (a:Open) +which we couldn't solve because of the kind incompatibility, and 'a' is free. +Then when we default 'a' we can solve the constraint. And we want to do +that before starting in on type classes. We MUST do it before reporting +errors, because it isn't an error! Trac #7967 was due to this. + Note [Top-level Defaulting Plan] ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - We have considered two design choices for where/when to apply defaulting. (i) Do it in SimplCheck mode only /whenever/ you try to solve some flat constraints, maybe deep inside the context of implications. @@ -782,7 +802,7 @@ defaultTyVar :: TcTyVar -> TcS TcTyVar -- Precondition: MetaTyVars only -- See Note [DefaultTyVar] defaultTyVar the_tv - | not (k `eqKind` default_k) + | Just default_k <- defaultKind_maybe (tyVarKind the_tv) = do { tv' <- TcS.cloneMetaTyVar the_tv ; let new_tv = setTyVarKind tv' default_k ; traceTcS "defaultTyVar" (ppr the_tv <+> ppr new_tv) @@ -793,9 +813,6 @@ defaultTyVar the_tv -- We keep the same Untouchables on tv' | otherwise = return the_tv -- The common case - where - k = tyVarKind the_tv - default_k = defaultKind k approximateWC :: WantedConstraints -> Cts -- Postcondition: Wanted or Derived Cts @@ -1150,7 +1167,7 @@ disambigGroup (default_ty:default_tys) group -- discard all side effects from the attempt do { setWantedTyBind the_tv default_ty ; implics_from_defaulting <- solveInteract wanteds - ; MASSERT (isEmptyBag implics_from_defaulting) + ; MASSERT(isEmptyBag implics_from_defaulting) -- I am not certain if any implications can be generated -- but I am letting this fail aggressively if this ever happens. diff --git a/compiler/typecheck/TcTyClsDecls.lhs b/compiler/typecheck/TcTyClsDecls.lhs index 67e322834811..044086d93785 100644 --- a/compiler/typecheck/TcTyClsDecls.lhs +++ b/compiler/typecheck/TcTyClsDecls.lhs @@ -1033,43 +1033,41 @@ consUseH98Syntax _ = True ----------------------------------- tcConDecls :: NewOrData -> TyCon -> ([TyVar], Type) -> [LConDecl Name] -> TcM [DataCon] -tcConDecls new_or_data rep_tycon res_tmpl cons - = mapM (addLocM (tcConDecl new_or_data rep_tycon res_tmpl)) cons +tcConDecls new_or_data rep_tycon (tmpl_tvs, res_tmpl) cons + = mapM (addLocM $ tcConDecl new_or_data rep_tycon tmpl_tvs res_tmpl) cons tcConDecl :: NewOrData - -> TyCon -- Representation tycon - -> ([TyVar], Type) -- Return type template (with its template tyvars) - -- (tvs, T tys), where T is the family TyCon + -> TyCon -- Representation tycon + -> [TyVar] -> Type -- Return type template (with its template tyvars) + -- (tvs, T tys), where T is the family TyCon -> ConDecl Name -> TcM DataCon -tcConDecl new_or_data rep_tycon res_tmpl -- Data types +tcConDecl new_or_data rep_tycon tmpl_tvs res_tmpl -- Data types (ConDecl { con_name = name , con_qvars = hs_tvs, con_cxt = hs_ctxt , con_details = hs_details, con_res = hs_res_ty }) = addErrCtxt (dataConCtxt name) $ do { traceTc "tcConDecl 1" (ppr name) - ; (tvs, ctxt, arg_tys, res_ty, is_infix, field_lbls, stricts) - <- tcHsTyVarBndrs hs_tvs $ \ tvs -> + ; (ctxt, arg_tys, res_ty, is_infix, field_lbls, stricts) + <- tcHsTyVarBndrs hs_tvs $ \ _ -> do { ctxt <- tcHsContext hs_ctxt ; details <- tcConArgs new_or_data hs_details ; res_ty <- tcConRes hs_res_ty ; let (is_infix, field_lbls, btys) = details (arg_tys, stricts) = unzip btys - ; return (tvs, ctxt, arg_tys, res_ty, is_infix, field_lbls, stricts) } + ; return (ctxt, arg_tys, res_ty, is_infix, field_lbls, stricts) } -- Generalise the kind variables (returning quantifed TcKindVars) -- and quantify the type variables (substituting their kinds) - -- REMEMBER: 'tvs' and 'tkvs' are: + -- REMEMBER: 'tkvs' are: -- ResTyH98: the *existential* type variables only -- ResTyGADT: *all* the quantified type variables -- c.f. the comment on con_qvars in HsDecls - ; tkvs <- case (res_ty, res_tmpl) of - (ResTyH98, (tvs, _)) -> quantifyTyVars (mkVarSet tvs) (tyVarsOfTypes arg_tys) - (ResTyGADT ty, _) -> quantifyTyVars emptyVarSet (tyVarsOfTypes (ty:arg_tys)) + ; tkvs <- case res_ty of + ResTyH98 -> quantifyTyVars (mkVarSet tmpl_tvs) (tyVarsOfTypes (ctxt++arg_tys)) + ResTyGADT res_ty -> quantifyTyVars emptyVarSet (tyVarsOfTypes (res_ty:ctxt++arg_tys)) - ; traceTc "tcConDecl" (ppr name $$ ppr arg_tys $$ ppr tvs $$ ppr tkvs) - -- Zonk to Types ; (ze, qtkvs) <- zonkTyBndrsX emptyZonkEnv tkvs ; arg_tys <- zonkTcTypeToTypes ze arg_tys @@ -1078,9 +1076,8 @@ tcConDecl new_or_data rep_tycon res_tmpl -- Data types ResTyH98 -> return ResTyH98 ResTyGADT ty -> ResTyGADT <$> zonkTcTypeToType ze ty - ; let (univ_tvs, ex_tvs, eq_preds, res_ty') = rejigConRes res_tmpl qtkvs res_ty + ; let (univ_tvs, ex_tvs, eq_preds, res_ty') = rejigConRes tmpl_tvs res_tmpl qtkvs res_ty - ; traceTc "tcConDecl 3" (vcat [ppr name, ppr tkvs, ppr qtkvs, ppr univ_tvs, ppr ex_tvs]) ; fam_envs <- tcGetFamInstEnvs ; buildDataCon fam_envs (unLoc name) is_infix stricts field_lbls @@ -1127,7 +1124,7 @@ tcConRes (ResTyGADT res_ty) = do { res_ty' <- tcHsLiftedType res_ty -- TI :: forall b1 c1. (b1 ~ c1) => b1 -> :R7T b1 c1 -- In this case orig_res_ty = T (e,e) -rejigConRes :: ([TyVar], Type) -- Template for result type; e.g. +rejigConRes :: [TyVar] -> Type -- Template for result type; e.g. -- data instance T [a] b c = ... -- gives template ([a,b,c], T [a] b c) -> [TyVar] -- where MkT :: forall x y z. ... @@ -1140,13 +1137,13 @@ rejigConRes :: ([TyVar], Type) -- Template for result type; e.g. -- the same as the parent tycon, because we are in the middle -- of a recursive knot; so it's postponed until checkValidDataCon -rejigConRes (tmpl_tvs, res_ty) dc_tvs ResTyH98 +rejigConRes tmpl_tvs res_ty dc_tvs ResTyH98 = (tmpl_tvs, dc_tvs, [], res_ty) -- In H98 syntax the dc_tvs are the existential ones -- data T a b c = forall d e. MkT ... -- The {a,b,c} are tc_tvs, and {d,e} are dc_tvs -rejigConRes (tmpl_tvs, res_tmpl) dc_tvs (ResTyGADT res_ty) +rejigConRes tmpl_tvs res_tmpl dc_tvs (ResTyGADT res_ty) -- E.g. data T [a] b c where -- MkT :: forall x y z. T [(x,y)] z z -- Then we generate diff --git a/compiler/typecheck/TcUnify.lhs b/compiler/typecheck/TcUnify.lhs index 4749d0c0b134..b3a4743939eb 100644 --- a/compiler/typecheck/TcUnify.lhs +++ b/compiler/typecheck/TcUnify.lhs @@ -31,11 +31,7 @@ module TcUnify ( matchExpectedAppTy, matchExpectedFunTys, matchExpectedFunKind, - wrapFunResCoercion, - - -------------------------------- - -- Errors - mkKindErrorCtxt + wrapFunResCoercion ) where @@ -1166,7 +1162,7 @@ unifyKindEq (FunTy a1 r1) (FunTy a2 r2) unifyKindEq (TyConApp kc1 k1s) (TyConApp kc2 k2s) | kc1 == kc2 - = ASSERT (length k1s == length k2s) + = ASSERT(length k1s == length k2s) -- Should succeed since the kind constructors are the same, -- and the kinds are sort-checked, thus fully applied do { mb_eqs <- zipWithM unifyKindEq k1s k2s @@ -1200,19 +1196,4 @@ uUnboundKVar kv1 non_var_k2 ; case occurCheckExpand dflags kv1 k2b of OC_OK k2c -> do { writeMetaTyVar kv1 k2c; return (Just EQ) } _ -> return Nothing } - -mkKindErrorCtxt :: Type -> Type -> Kind -> Kind -> TidyEnv -> TcM (TidyEnv, SDoc) -mkKindErrorCtxt ty1 ty2 k1 k2 env0 - = let (env1, ty1') = tidyOpenType env0 ty1 - (env2, ty2') = tidyOpenType env1 ty2 - (env3, k1' ) = tidyOpenKind env2 k1 - (env4, k2' ) = tidyOpenKind env3 k2 - in do ty1 <- zonkTcType ty1' - ty2 <- zonkTcType ty2' - k1 <- zonkTcKind k1' - k2 <- zonkTcKind k2' - return (env4, - vcat [ ptext (sLit "Kind incompatibility when matching types xx:") - , nest 2 (vcat [ ppr ty1 <+> dcolon <+> ppr k1 - , ppr ty2 <+> dcolon <+> ppr k2 ]) ]) \end{code} diff --git a/compiler/typecheck/TcUnify.lhs-boot b/compiler/typecheck/TcUnify.lhs-boot index aa9353670588..35a7155a08ee 100644 --- a/compiler/typecheck/TcUnify.lhs-boot +++ b/compiler/typecheck/TcUnify.lhs-boot @@ -1,14 +1,11 @@ \begin{code} module TcUnify where -import TcType ( TcTauType, Type, Kind ) -import VarEnv ( TidyEnv ) +import TcType ( TcTauType ) import TcRnTypes ( TcM ) import TcEvidence ( TcCoercion ) -import Outputable ( SDoc ) -- This boot file exists only to tie the knot between -- TcUnify and Inst unifyType :: TcTauType -> TcTauType -> TcM TcCoercion -mkKindErrorCtxt :: Type -> Type -> Kind -> Kind -> TidyEnv -> TcM (TidyEnv, SDoc) \end{code} diff --git a/compiler/typecheck/TcValidity.lhs b/compiler/typecheck/TcValidity.lhs index 5846e28ba413..968d8695ccfe 100644 --- a/compiler/typecheck/TcValidity.lhs +++ b/compiler/typecheck/TcValidity.lhs @@ -306,7 +306,7 @@ check_syn_tc_app ctxt rank ty tc tys ; liberal <- xoptM Opt_LiberalTypeSynonyms ; if not liberal || isSynFamilyTyCon tc then -- For H98 and synonym families, do check the type args - mapM_ (check_mono_type ctxt synArgMonoType) tys + mapM_ check_arg tys else -- In the liberal case (only for closed syns), expand then check case tcView ty of @@ -315,13 +315,15 @@ check_syn_tc_app ctxt rank ty tc tys | GhciCtxt <- ctxt -- Accept under-saturated type synonyms in -- GHCi :kind commands; see Trac #7586 - = mapM_ (check_mono_type ctxt synArgMonoType) tys + = mapM_ check_arg tys | otherwise = failWithTc (arityErr "Type synonym" (tyConName tc) tc_arity n_args) where n_args = length tys tc_arity = tyConArity tc + check_arg | isSynFamilyTyCon tc = check_arg_type ctxt rank + | otherwise = check_mono_type ctxt synArgMonoType ---------------------------------------- check_ubx_tuple :: UserTypeCtxt -> KindOrType diff --git a/compiler/types/Coercion.lhs b/compiler/types/Coercion.lhs index 074f61f9ba81..e1dec4938039 100644 --- a/compiler/types/Coercion.lhs +++ b/compiler/types/Coercion.lhs @@ -93,7 +93,6 @@ import VarEnv import VarSet import Maybes ( orElse ) import Name ( Name, NamedThing(..), nameUnique, nameModule, getSrcSpan ) -import NameSet import OccName ( parenSymOcc ) import Util import BasicTypes @@ -583,7 +582,7 @@ splitForAllCo_maybe _ = Nothing coVarKind :: CoVar -> (Type,Type) coVarKind cv | Just (tc, [_kind,ty1,ty2]) <- splitTyConApp_maybe (varType cv) - = ASSERT (tc `hasKey` eqPrimTyConKey) + = ASSERT(tc `hasKey` eqPrimTyConKey) (ty1,ty2) | otherwise = panic "coVarKind, non coercion variable" @@ -693,7 +692,7 @@ mkFunCo co1 co2 = mkTyConAppCo funTyCon [co1, co2] mkForAllCo :: Var -> Coercion -> Coercion -- note that a TyVar should be used here, not a CoVar (nor a TcTyVar) mkForAllCo tv (Refl ty) = ASSERT( isTyVar tv ) Refl (mkForAllTy tv ty) -mkForAllCo tv co = ASSERT ( isTyVar tv ) ForAllCo tv co +mkForAllCo tv co = ASSERT( isTyVar tv ) ForAllCo tv co ------------------------------- @@ -830,25 +829,21 @@ splitNewTypeRepCo_maybe _ topNormaliseNewType :: Type -> Maybe (Type, Coercion) topNormaliseNewType ty - = case topNormaliseNewTypeX emptyNameSet ty of + = case topNormaliseNewTypeX initRecTc ty of Just (_, co, ty) -> Just (ty, co) Nothing -> Nothing -topNormaliseNewTypeX :: NameSet -> Type -> Maybe (NameSet, Coercion, Type) +topNormaliseNewTypeX :: RecTcChecker -> Type -> Maybe (RecTcChecker, Coercion, Type) topNormaliseNewTypeX rec_nts ty | Just ty' <- coreView ty -- Expand predicates and synonyms = topNormaliseNewTypeX rec_nts ty' topNormaliseNewTypeX rec_nts (TyConApp tc tys) - | Just (rep_ty, co) <- instNewTyCon_maybe tc tys - , not (tc_name `elemNameSet` rec_nts) -- See Note [Expanding newtypes] in Type + | Just rec_nts' <- checkRecTc rec_nts tc -- See Note [Expanding newtypes] in TyCon + , Just (rep_ty, co) <- instNewTyCon_maybe tc tys = case topNormaliseNewTypeX rec_nts' rep_ty of Nothing -> Just (rec_nts', co, rep_ty) Just (rec_nts', co', rep_ty') -> Just (rec_nts', co `mkTransCo` co', rep_ty') - where - tc_name = tyConName tc - rec_nts' | isRecursiveTyCon tc = addOneToNameSet rec_nts tc_name - | otherwise = rec_nts topNormaliseNewTypeX _ _ = Nothing \end{code} @@ -1066,7 +1061,53 @@ lookupCoVar (CvSubst _ _ cenv) v = lookupVarEnv cenv v %* * %************************************************************************ +Note [Lifting coercions over types: liftCoSubst] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +The KPUSH rule deals with this situation + data T a = MkK (a -> Maybe a) + g :: T t1 ~ K t2 + x :: t1 -> Maybe t1 + + case (K @t1 x) |> g of + K (y:t2 -> Maybe t2) -> rhs + +We want to push the coercion inside the constructor application. +So we do this + + g' :: t1~t2 = Nth 0 g + + case K @t2 (x |> g' -> Maybe g') of + K (y:t2 -> Maybe t2) -> rhs + +The crucial operation is that we + * take the type of K's argument: a -> Maybe a + * and substitute g' for a +thus giving *coercion*. This is what liftCoSubst does. + +Note [Substituting kinds in liftCoSubst] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +We need to take care with kind polymorphism. Suppose + K :: forall k (a:k). (forall b:k. a -> b) -> T k a + +Now given (K @kk1 @ty1 v) |> g) where + g :: T kk1 ty1 ~ T kk2 ty2 +we want to compute + (forall b:k a->b) [ Nth 0 g/k, Nth 1 g/a ] +Notice that we MUST substitute for 'k'; this happens in +liftCoSubstTyVarBndr. But what should we substitute? +We need to take b's kind 'k' and return a Kind, not a Coercion! + +Happily we can do this because we know that all kind coercions +((Nth 0 g) in this case) are Refl. So we need a special purpose + subst_kind: LiftCoSubst -> Kind -> Kind +that expects a Refl coercion (or something equivalent to Refl) +when it looks up a kind variable. + \begin{code} +-- ---------------------------------------------------- +-- See Note [Lifting coercions over types: liftCoSubst] +-- ---------------------------------------------------- + data LiftCoSubst = LCS InScopeSet LiftCoEnv type LiftCoEnv = VarEnv Coercion @@ -1109,14 +1150,44 @@ liftCoSubstTyVar :: LiftCoSubst -> TyVar -> Maybe Coercion liftCoSubstTyVar (LCS _ cenv) tv = lookupVarEnv cenv tv liftCoSubstTyVarBndr :: LiftCoSubst -> TyVar -> (LiftCoSubst, TyVar) -liftCoSubstTyVarBndr (LCS in_scope cenv) old_var +liftCoSubstTyVarBndr subst@(LCS in_scope cenv) old_var = (LCS (in_scope `extendInScopeSet` new_var) new_cenv, new_var) where new_cenv | no_change = delVarEnv cenv old_var | otherwise = extendVarEnv cenv old_var (Refl (TyVarTy new_var)) - no_change = new_var == old_var - new_var = uniqAway in_scope old_var + no_change = no_kind_change && (new_var == old_var) + + new_var1 = uniqAway in_scope old_var + + old_ki = tyVarKind old_var + no_kind_change = isEmptyVarSet (tyVarsOfType old_ki) + new_var | no_kind_change = new_var1 + | otherwise = setTyVarKind new_var1 (subst_kind subst old_ki) + +subst_kind :: LiftCoSubst -> Kind -> Kind +-- See Note [Substituting kinds in liftCoSubst] +subst_kind subst@(LCS _ cenv) kind + = go kind + where + go (LitTy n) = n `seq` LitTy n + go (TyVarTy kv) = subst_kv kv + go (TyConApp tc tys) = let args = map go tys + in args `seqList` TyConApp tc args + + go (FunTy arg res) = (FunTy $! (go arg)) $! (go res) + go (AppTy fun arg) = mkAppTy (go fun) $! (go arg) + go (ForAllTy tv ty) = case liftCoSubstTyVarBndr subst tv of + (subst', tv') -> + ForAllTy tv' $! (subst_kind subst' ty) + + subst_kv kv + | Just co <- lookupVarEnv cenv kv + , let co_kind = coercionKind co + = ASSERT2( pFst co_kind `eqKind` pSnd co_kind, ppr kv $$ ppr co ) + pFst co_kind + | otherwise + = TyVarTy kv \end{code} \begin{code} diff --git a/compiler/types/FamInstEnv.lhs b/compiler/types/FamInstEnv.lhs index 05a3ce5a898b..e2c4f0521291 100644 --- a/compiler/types/FamInstEnv.lhs +++ b/compiler/types/FamInstEnv.lhs @@ -250,7 +250,7 @@ mkImportedFamInst fam mb_tcs axiom %************************************************************************ Note [FamInstEnv] -~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~ A FamInstEnv maps a family name to the list of known instances for that family. The same FamInstEnv includes both 'data family' and 'type family' instances. @@ -268,6 +268,25 @@ Neverthless it is still useful to have data families in the FamInstEnv: - In standalone deriving instance Eq (T [Int]) we need to find the representation type for T [Int] +Note [Varying number of patterns for data family axioms] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +For data families, the number of patterns may vary between instances. +For example + data family T a b + data instance T Int a = T1 a | T2 + data instance T Bool [a] = T3 a + +Then we get a data type for each instance, and an axiom: + data TInt a = T1 a | T2 + data TBoolList a = T3 a + + axiom ax7 :: T Int ~ TInt -- Eta-reduced + axiom ax8 a :: T Bool [a] ~ TBoolList a + +These two axioms for T, one with one pattern, one with two. The reason +for this eta-reduction is decribed in TcInstDcls + Note [Eta reduction for data family axioms] + \begin{code} type FamInstEnv = UniqFM FamilyInstEnv -- Maps a family to its instances -- See Note [FamInstEnv] @@ -275,14 +294,11 @@ type FamInstEnv = UniqFM FamilyInstEnv -- Maps a family to its instances type FamInstEnvs = (FamInstEnv, FamInstEnv) -- External package inst-env, Home-package inst-env -data FamilyInstEnv +newtype FamilyInstEnv = FamIE [FamInst] -- The instances for a particular family, in any order - Bool -- True <=> there is an instance of form T a b c - -- If *not* then the common case of looking up - -- (T a b c) can fail immediately instance Outputable FamilyInstEnv where - ppr (FamIE fs b) = ptext (sLit "FamIE") <+> ppr b <+> vcat (map ppr fs) + ppr (FamIE fs = ptext (sLit "FamIE") <+> vcat (map ppr fs) -- INVARIANTS: -- * The fs_tvs are distinct in each FamInst @@ -295,14 +311,14 @@ emptyFamInstEnv :: FamInstEnv emptyFamInstEnv = emptyUFM famInstEnvElts :: FamInstEnv -> [FamInst] -famInstEnvElts fi = [elt | FamIE elts _ <- eltsUFM fi, elt <- elts] +famInstEnvElts fi = [elt | FamIE elts <- eltsUFM fi, elt <- elts] familyInstances :: (FamInstEnv, FamInstEnv) -> TyCon -> [FamInst] familyInstances (pkg_fie, home_fie) fam = get home_fie ++ get pkg_fie where get env = case lookupUFM env fam of - Just (FamIE insts _) -> insts + Just (FamIE insts) -> insts Nothing -> [] -- | Collects the names of the concrete types and type constructors that @@ -321,19 +337,17 @@ extendFamInstEnvList inst_env fis = foldl extendFamInstEnv inst_env fis extendFamInstEnv :: FamInstEnv -> FamInst -> FamInstEnv extendFamInstEnv inst_env ins_item@(FamInst {fi_fam = cls_nm, fi_tcs = mb_tcs}) - = addToUFM_C add inst_env cls_nm (FamIE [ins_item] ins_tyvar) + = addToUFM_C add inst_env cls_nm (FamIE [ins_item]) where - add (FamIE items tyvar) _ = FamIE (ins_item:items) - (ins_tyvar || tyvar) - ins_tyvar = not (any isJust mb_tcs) + add (FamIE items) _ = FamIE (ins_item:items) deleteFromFamInstEnv :: FamInstEnv -> FamInst -> FamInstEnv deleteFromFamInstEnv inst_env fam_inst@(FamInst {fi_fam = fam_nm}) = adjustUFM adjust inst_env fam_nm where adjust :: FamilyInstEnv -> FamilyInstEnv - adjust (FamIE items tyvars) - = FamIE (filterOut (identicalFamInst fam_inst) items) tyvars + adjust (FamIE items) + = FamIE (filterOut (identicalFamInst fam_inst) items) identicalFamInst :: FamInst -> FamInst -> Bool -- Same LHS, *and* the instance is defined in the same module @@ -621,38 +635,15 @@ lookup_fam_inst_env' -- The worker, local to this module :: MatchFun -> OneSidedMatch -> FamInstEnv - -> TyCon -> [Type] -- What we are looking for - -> [FamInstMatch] -- Successful matches -lookup_fam_inst_env' match_fun one_sided ie fam tys - | not (isOpenFamilyTyCon fam) - = [] - | otherwise - = ASSERT2( n_tys >= arity, ppr fam <+> ppr tys ) -- Family type applications must be saturated - lookup ie + -> TyCon -> [Type] -- What we are looking for + -> [FamInstMatch] +lookup_fam_inst_env' match_fun _one_sided ie fam tys + | isOpenFamilyTyCon fam + , Just (FamIE insts) <- lookupUFM ie fam + = find match_fun tys insts -- The common case + | otherwise = [] where - -- See Note [Over-saturated matches] - arity = tyConArity fam - n_tys = length tys - extra_tys = drop arity tys - (match_tys, add_extra_tys) - | arity < n_tys = (take arity tys, \res_tys -> res_tys ++ extra_tys) - | otherwise = (tys, \res_tys -> res_tys) - -- The second case is the common one, hence functional representation - - -------------- rough_tcs = roughMatchTcs match_tys - all_tvs = all isNothing rough_tcs && one_sided - - -------------- - lookup env = case lookupUFM env fam of - Nothing -> [] -- No instances for this class - Just (FamIE insts has_tv_insts) - -- Short cut for common case: - -- The thing we are looking up is of form (C a - -- b c), and the FamIE has no instances of - -- that form, so don't bother to search - | all_tvs && not has_tv_insts -> [] - | otherwise -> find insts -------------- find [] = [] @@ -663,17 +654,22 @@ lookup_fam_inst_env' match_fun one_sided ie fam tys = find rest -- Proper check - | Just subst <- match_fun item (mkVarSet tpl_tvs) tpl_tys match_tys + | Just subst <- match_fun item (mkVarSet tpl_tvs) tpl_tys match_tys1 = (FamInstMatch { fim_instance = item - , fim_tys = add_extra_tys $ substTyVars subst tpl_tvs }) + , fim_tys = substTyVars subst tpl_tvs `chkAppend` match_tys2 }) : find rest -- No match => try next | otherwise = find rest --- Precondition: the tycon is saturated (or over-saturated) + + -- Precondition: the tycon is saturated (or over-saturated) + + -- Deal with over-saturation + -- See Note [Over-saturated matches] + (match_tys1, match_tys2) = splitAtList mb_tcs match_tys -lookup_fam_inst_env -- The worker, local to this module +lookup_fam_inst_env -- The worker, local to this module :: MatchFun -> OneSidedMatch -> FamInstEnvs @@ -813,25 +809,24 @@ topNormaliseType :: FamInstEnvs -- Its a bit like Type.repType, but handles type families too topNormaliseType env ty - = go [] ty + = go initRecTc ty where - go :: [TyCon] -> Type -> Maybe (Coercion, Type) - go rec_nts ty | Just ty' <- coreView ty -- Expand synonyms - = go rec_nts ty' - - go rec_nts (TyConApp tc tys) - | isNewTyCon tc -- Expand newtypes - = if tc `elem` rec_nts -- See Note [Expanding newtypes] in Type.lhs - then Nothing - else let nt_co = mkUnbranchedAxInstCo (newTyConCo tc) tys - in add_co nt_co rec_nts' nt_rhs - - | isFamilyTyCon tc -- Expand family tycons - , (co, ty) <- normaliseTcApp env tc tys - -- Note that normaliseType fully normalises 'tys', - -- It has do to so to be sure that nested calls like - -- F (G Int) - -- are correctly top-normalised + go :: RecTcChecker -> Type -> Maybe (Coercion, Type) + go rec_nts ty + | Just ty' <- coreView ty -- Expand synonyms + = go rec_nts ty' + + | Just (rec_nts', nt_co, nt_rhs) <- topNormaliseNewTypeX rec_nts ty + = add_co nt_co rec_nts' nt_rhs + + go rec_nts (TyConApp tc tys) + | isFamilyTyCon tc -- Expand family tycons + , (co, ty) <- normaliseTcApp env tc tys + -- Note that normaliseType fully normalises 'tys', + -- wrt type functions but *not* newtypes + -- It has do to so to be sure that nested calls like + -- F (G Int) + -- are correctly top-normalised , not (isReflCo co) = add_co co rec_nts ty where @@ -851,7 +846,6 @@ topNormaliseType env ty normaliseTcApp :: FamInstEnvs -> TyCon -> [Type] -> (Coercion, Type) normaliseTcApp env tc tys | isFamilyTyCon tc - , tyConArity tc <= length tys -- Unsaturated data families are possible , Just (co, rhs) <- chooseAxiom env tc ntys = let -- A reduction is possible first_coi = mkTransCo tycon_coi co diff --git a/compiler/types/Kind.lhs b/compiler/types/Kind.lhs index 0082a333772f..ff0ad013abb1 100644 --- a/compiler/types/Kind.lhs +++ b/compiler/types/Kind.lhs @@ -40,10 +40,10 @@ module Kind ( isAnyKind, isAnyKindCon, okArrowArgKind, okArrowResultKind, - isSubOpenTypeKind, + isSubOpenTypeKind, isSubOpenTypeKindKey, isSubKind, isSubKindCon, tcIsSubKind, tcIsSubKindCon, - defaultKind, + defaultKind, defaultKind_maybe, -- ** Functions on variables kiVarsOfKind, kiVarsOfKinds @@ -60,6 +60,7 @@ import TyCon import VarSet import PrelNames import Outputable +import Maybes( orElse ) import Util \end{code} @@ -172,13 +173,8 @@ returnsConstraintKind _ = False -- arg -> res okArrowArgKindCon, okArrowResultKindCon :: TyCon -> Bool -okArrowArgKindCon kc - | isLiftedTypeKindCon kc = True - | isUnliftedTypeKindCon kc = True - | isConstraintKindCon kc = True - | otherwise = False - -okArrowResultKindCon = okArrowArgKindCon +okArrowArgKindCon = isSubOpenTypeKindCon +okArrowResultKindCon = isSubOpenTypeKindCon okArrowArgKind, okArrowResultKind :: Kind -> Bool okArrowArgKind (TyConApp kc []) = okArrowArgKindCon kc @@ -198,14 +194,17 @@ isSubOpenTypeKind :: Kind -> Bool isSubOpenTypeKind (TyConApp kc []) = isSubOpenTypeKindCon kc isSubOpenTypeKind _ = False -isSubOpenTypeKindCon kc - = isOpenTypeKindCon kc - || isUnliftedTypeKindCon kc - || isLiftedTypeKindCon kc - || isConstraintKindCon kc -- Needed for error (Num a) "blah" - -- and so that (Ord a -> Eq a) is well-kinded - -- and so that (# Eq a, Ord b #) is well-kinded - -- See Note [Kind Constraint and kind *] +isSubOpenTypeKindCon kc = isSubOpenTypeKindKey (tyConUnique kc) + +isSubOpenTypeKindKey :: Unique -> Bool +isSubOpenTypeKindKey uniq + = uniq == openTypeKindTyConKey + || uniq == unliftedTypeKindTyConKey + || uniq == liftedTypeKindTyConKey + || uniq == constraintKindTyConKey -- Needed for error (Num a) "blah" + -- and so that (Ord a -> Eq a) is well-kinded + -- and so that (# Eq a, Ord b #) is well-kinded + -- See Note [Kind Constraint and kind *] -- | Is this a kind (i.e. a type-of-types)? isKind :: Kind -> Bool @@ -271,7 +270,8 @@ tcIsSubKindCon kc1 kc2 | otherwise = isSubKindCon kc1 kc2 ------------------------- -defaultKind :: Kind -> Kind +defaultKind :: Kind -> Kind +defaultKind_maybe :: Kind -> Maybe Kind -- ^ Used when generalising: default OpenKind and ArgKind to *. -- See "Type#kind_subtyping" for more information on what that means @@ -289,9 +289,11 @@ defaultKind :: Kind -> Kind -- This defaulting is done in TcMType.zonkTcTyVarBndr. -- -- The test is really whether the kind is strictly above '*' -defaultKind (TyConApp kc _args) - | isOpenTypeKindCon kc = ASSERT( null _args ) liftedTypeKind -defaultKind k = k +defaultKind_maybe (TyConApp kc _args) + | isOpenTypeKindCon kc = ASSERT( null _args ) Just liftedTypeKind +defaultKind_maybe _ = Nothing + +defaultKind k = defaultKind_maybe k `orElse` k -- Returns the free kind variables in a kind kiVarsOfKind :: Kind -> VarSet diff --git a/compiler/types/TyCon.lhs b/compiler/types/TyCon.lhs index 1ea0ebf68b92..fb078ec979a2 100644 --- a/compiler/types/TyCon.lhs +++ b/compiler/types/TyCon.lhs @@ -83,7 +83,10 @@ module TyCon( -- * Primitive representations of Types PrimRep(..), PrimElemRep(..), tyConPrimRep, - primRepSizeW, primElemRepSizeB + primRepSizeW, primElemRepSizeB, + + -- * Recursion breaking + RecTcChecker, initRecTc, checkRecTc ) where #include "HsVersions.h" @@ -97,6 +100,7 @@ import BasicTypes import DynFlags import ForeignCall import Name +import NameSet import CoAxiom import PrelNames import Maybes @@ -1569,3 +1573,55 @@ instance Data.Data TyCon where dataTypeOf _ = mkNoRepType "TyCon" \end{code} + +%************************************************************************ +%* * + Walking over recursive TyCons +%* * +%************************************************************************ + +Note [Expanding newtypes and products] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +When expanding a type to expose a data-type constructor, we need to be +careful about newtypes, lest we fall into an infinite loop. Here are +the key examples: + + newtype Id x = MkId x + newtype Fix f = MkFix (f (Fix f)) + newtype T = MkT (T -> T) + + Type Expansion + -------------------------- + T T -> T + Fix Maybe Maybe (Fix Maybe) + Id (Id Int) Int + Fix Id NO NO NO + +Notice that we can expand T, even though it's recursive. +And we can expand Id (Id Int), even though the Id shows up +twice at the outer level. + +So, when expanding, we keep track of when we've seen a recursive +newtype at outermost level; and bale out if we see it again. + +We sometimes want to do the same for product types, so that the +strictness analyser doesn't unbox infinitely deeply. + +The function that manages this is checkRecTc. + +\begin{code} +newtype RecTcChecker = RC NameSet + +initRecTc :: RecTcChecker +initRecTc = RC emptyNameSet + +checkRecTc :: RecTcChecker -> TyCon -> Maybe RecTcChecker +-- Nothing => Recursion detected +-- Just rec_tcs => Keep going +checkRecTc (RC rec_nts) tc + | not (isRecursiveTyCon tc) = Just (RC rec_nts) + | tc_name `elemNameSet` rec_nts = Nothing + | otherwise = Just (RC (addOneToNameSet rec_nts tc_name)) + where + tc_name = tyConName tc +\end{code} diff --git a/compiler/types/Type.lhs b/compiler/types/Type.lhs index 1b62d32cbc9d..993507062d07 100644 --- a/compiler/types/Type.lhs +++ b/compiler/types/Type.lhs @@ -22,7 +22,7 @@ module Type ( -- ** Constructing and deconstructing types mkTyVarTy, mkTyVarTys, getTyVar, getTyVar_maybe, - mkAppTy, mkAppTys, mkNakedAppTys, splitAppTy, splitAppTys, + mkAppTy, mkAppTys, splitAppTy, splitAppTys, splitAppTy_maybe, repSplitAppTy_maybe, mkFunTy, mkFunTys, splitFunTy, splitFunTy_maybe, @@ -159,14 +159,13 @@ import Class import TyCon import TysPrim import {-# SOURCE #-} TysWiredIn ( eqTyCon, typeNatKind, typeSymbolKind ) -import PrelNames ( eqTyConKey, ipClassNameKey, +import PrelNames ( eqTyConKey, ipClassNameKey, openTypeKindTyConKey, constraintKindTyConKey, liftedTypeKindTyConKey ) import CoAxiom -- others import Unique ( Unique, hasKey ) import BasicTypes ( Arity, RepArity ) -import NameSet import StaticFlags import Util import Outputable @@ -350,11 +349,6 @@ mkAppTys ty1 [] = ty1 mkAppTys (TyConApp tc tys1) tys2 = mkTyConApp tc (tys1 ++ tys2) mkAppTys ty1 tys2 = foldl AppTy ty1 tys2 -mkNakedAppTys :: Type -> [Type] -> Type -mkNakedAppTys ty1 [] = ty1 -mkNakedAppTys (TyConApp tc tys1) tys2 = mkNakedTyConApp tc (tys1 ++ tys2) -mkNakedAppTys ty1 tys2 = foldl AppTy ty1 tys2 - ------------- splitAppTy_maybe :: Type -> Maybe (Type, Type) -- ^ Attempt to take a type application apart, whether it is a @@ -595,31 +589,6 @@ The reason is that we then get better (shorter) type signatures in interfaces. Notably this plays a role in tcTySigs in TcBinds.lhs. -Note [Expanding newtypes] -~~~~~~~~~~~~~~~~~~~~~~~~~ -When expanding a type to expose a data-type constructor, we need to be -careful about newtypes, lest we fall into an infinite loop. Here are -the key examples: - - newtype Id x = MkId x - newtype Fix f = MkFix (f (Fix f)) - newtype T = MkT (T -> T) - - Type Expansion - -------------------------- - T T -> T - Fix Maybe Maybe (Fix Maybe) - Id (Id Int) Int - Fix Id NO NO NO - -Notice that we can expand T, even though it's recursive. -And we can expand Id (Id Int), even though the Id shows up -twice at the outer level. - -So, when expanding, we keep track of when we've seen a recursive -newtype at outermost level; and bale out if we see it again. - - Representation types ~~~~~~~~~~~~~~~~~~~~ @@ -654,9 +623,9 @@ flattenRepType (UnaryRep ty) = [ty] -- It's useful in the back end of the compiler. repType :: Type -> RepType repType ty - = go emptyNameSet ty + = go initRecTc ty where - go :: NameSet -> Type -> RepType + go :: RecTcChecker -> Type -> RepType go rec_nts ty -- Expand predicates and synonyms | Just ty' <- coreView ty = go rec_nts ty' @@ -667,10 +636,7 @@ repType ty go rec_nts (TyConApp tc tys) -- Expand newtypes | isNewTyCon tc , tys `lengthAtLeast` tyConArity tc - , let tc_name = tyConName tc - rec_nts' | isRecursiveTyCon tc = addOneToNameSet rec_nts tc_name - | otherwise = rec_nts - , not (tc_name `elemNameSet` rec_nts) -- See Note [Expanding newtypes] + , Just rec_nts' <- checkRecTc rec_nts tc -- See Note [Expanding newtypes] in TyCon = go rec_nts' (newTyConInstRhs tc tys) | isUnboxedTupleTyCon tc @@ -1250,7 +1216,7 @@ cmpTypeX env t1 t2 | Just t1' <- coreView t1 = cmpTypeX env t1' t2 -- So the RHS has a data type cmpTypeX env (TyVarTy tv1) (TyVarTy tv2) = rnOccL env tv1 `compare` rnOccR env tv2 -cmpTypeX env (ForAllTy tv1 t1) (ForAllTy tv2 t2) = cmpTypeX env (tyVarKind tv1) (tyVarKind tv1) +cmpTypeX env (ForAllTy tv1 t1) (ForAllTy tv2 t2) = cmpTypeX env (tyVarKind tv1) (tyVarKind tv2) `thenCmp` cmpTypeX (rnBndr2 env tv1 tv2) t1 t2 cmpTypeX env (AppTy s1 t1) (AppTy s2 t2) = cmpTypeX env s1 s2 `thenCmp` cmpTypeX env t1 t2 cmpTypeX env (FunTy s1 t1) (FunTy s2 t2) = cmpTypeX env s1 s2 `thenCmp` cmpTypeX env t1 t2 @@ -1291,7 +1257,13 @@ cmpTypesX _ _ [] = GT cmpTc :: TyCon -> TyCon -> Ordering -- Here we treat * and Constraint as equal -- See Note [Kind Constraint and kind *] in Kinds.lhs -cmpTc tc1 tc2 = nu1 `compare` nu2 +-- +-- Also we treat OpenTypeKind as equal to either * or # +-- See Note [Comparison with OpenTypeKind] +cmpTc tc1 tc2 + | u1 == openTypeKindTyConKey, isSubOpenTypeKindKey u2 = EQ + | u2 == openTypeKindTyConKey, isSubOpenTypeKindKey u1 = EQ + | otherwise = nu1 `compare` nu2 where u1 = tyConUnique tc1 nu1 = if u1==constraintKindTyConKey then liftedTypeKindTyConKey else u1 @@ -1299,6 +1271,18 @@ cmpTc tc1 tc2 = nu1 `compare` nu2 nu2 = if u2==constraintKindTyConKey then liftedTypeKindTyConKey else u2 \end{code} +Note [Comparison with OpenTypeKind] +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +In PrimOpWrappers we have things like + PrimOpWrappers.mkWeak# = /\ a b c. Prim.mkWeak# a b c +where + Prim.mkWeak# :: forall (a:Open) b c. a -> b -> c + -> State# RealWorld -> (# State# RealWorld, Weak# b #) +Now, eta reduction will turn the definition into + PrimOpWrappers.mkWeak# = Prim.mkWeak# +which is kind-of OK, but now the types aren't really equal. So HACK HACK +we pretend (in Core) that Open is equal to * or #. I hate this. + Note [cmpTypeX] ~~~~~~~~~~~~~~~ diff --git a/compiler/types/TypeRep.lhs b/compiler/types/TypeRep.lhs index f7fdd595aa5e..ef79974605e9 100644 --- a/compiler/types/TypeRep.lhs +++ b/compiler/types/TypeRep.lhs @@ -33,7 +33,7 @@ module TypeRep ( PredType, ThetaType, -- Synonyms -- Functions over types - mkNakedTyConApp, mkTyConTy, mkTyVarTy, mkTyVarTys, + mkTyConTy, mkTyVarTy, mkTyVarTys, isLiftedTypeKind, isSuperKind, isTypeVar, isKindVar, -- Pretty-printing @@ -140,7 +140,7 @@ data Type Var -- Type or kind variable Type -- ^ A polymorphic type - | LitTy TyLit -- ^ Type literals are simillar to type constructors. + | LitTy TyLit -- ^ Type literals are similar to type constructors. deriving (Data.Data, Data.Typeable) @@ -280,14 +280,6 @@ mkTyVarTy = TyVarTy mkTyVarTys :: [TyVar] -> [Type] mkTyVarTys = map mkTyVarTy -- a common use of mkTyVarTy -mkNakedTyConApp :: TyCon -> [Type] -> Type --- Builds a TyConApp --- * without being strict in TyCon, --- * the TyCon should never be a saturated FunTyCon --- Type.mkTyConApp is the usual one -mkNakedTyConApp tc tys - = TyConApp (ASSERT( not (isFunTyCon tc && length tys == 2) ) tc) tys - -- | Create the plain type constructor type which has been applied to no type arguments at all. mkTyConTy :: TyCon -> Type mkTyConTy tycon = TyConApp tycon [] diff --git a/compiler/utils/Util.lhs b/compiler/utils/Util.lhs index 90a2077c717e..e2fd0aa09308 100644 --- a/compiler/utils/Util.lhs +++ b/compiler/utils/Util.lhs @@ -18,7 +18,7 @@ module Util ( unzipWith, - mapFst, mapSnd, + mapFst, mapSnd, chkAppend, mapAndUnzip, mapAndUnzip3, mapAccumL2, nOfThem, filterOut, partitionWith, splitEithers, @@ -259,6 +259,13 @@ splitEithers (e : es) = case e of Left x -> (x:xs, ys) Right y -> (xs, y:ys) where (xs,ys) = splitEithers es + +chkAppend :: [a] -> [a] -> [a] +-- Checks for the second arguemnt being empty +-- Used in situations where that situation is common +chkAppend xs ys + | null ys = xs + | otherwise = xs ++ ys \end{code} A paranoid @zip@ (and some @zipWith@ friends) that checks the lists diff --git a/compiler/vectorise/Vectorise/Generic/PADict.hs b/compiler/vectorise/Vectorise/Generic/PADict.hs index f70e796daa93..7e70f2dd1104 100644 --- a/compiler/vectorise/Vectorise/Generic/PADict.hs +++ b/compiler/vectorise/Vectorise/Generic/PADict.hs @@ -68,37 +68,35 @@ buildPADict vect_tc prepr_ax pdata_tc pdatas_tc repr ; pr_cls <- builtin prClass ; return $ mkClassPred pr_cls [r] } - ; super_tys <- sequence [mk_super_ty | not (null tvs)] + ; super_tys <- sequence [mk_super_ty | not (null tvs)] ; super_args <- mapM (newLocalVar (fsLit "pr")) super_tys - ; let all_args = super_args ++ args + ; let val_args = super_args ++ args + all_args = tvs ++ val_args -- ...it is constant otherwise ; super_consts <- sequence [prDictOfPReprInstTyCon inst_ty prepr_ax [] | null tvs] -- Get ids for each of the methods in the dictionary, including superclass ; paMethodBuilders <- buildPAScAndMethods - ; method_ids <- mapM (method all_args dfun_name) paMethodBuilders + ; method_ids <- mapM (method val_args dfun_name) paMethodBuilders -- Expression to build the dictionary. ; pa_dc <- builtin paDataCon - ; let dict = mkLams (tvs ++ all_args) - $ mkConApp pa_dc - $ Type inst_ty - : map Var super_args ++ super_consts -- the superclass dictionary is either lambda-bound or constant - ++ map (method_call all_args) method_ids + ; let dict = mkLams all_args (mkConApp pa_dc con_args) + con_args = Type inst_ty + : map Var super_args -- the superclass dictionary is either + ++ super_consts -- lambda-bound or constant + ++ map (method_call val_args) method_ids -- Build the type of the dictionary function. ; pa_cls <- builtin paClass ; let dfun_ty = mkForAllTys tvs - $ mkFunTys (map varType all_args) + $ mkFunTys (map varType val_args) (mkClassPred pa_cls [inst_ty]) -- Set the unfolding for the inliner. ; raw_dfun <- newExportedVar dfun_name dfun_ty - ; let dfun_unf = mkDFunUnfolding dfun_ty $ - map (const $ DFunLamArg 0) super_args - ++ map DFunPolyArg super_consts - ++ map (DFunPolyArg . Var) method_ids + ; let dfun_unf = mkDFunUnfolding all_args pa_dc con_args dfun = raw_dfun `setIdUnfolding` dfun_unf `setInlinePragma` dfunInlinePragma diff --git a/configure.ac b/configure.ac index 66f60ffe85e6..7bbeca66055b 100644 --- a/configure.ac +++ b/configure.ac @@ -553,8 +553,6 @@ dnl ** look to see if we have a C compiler using an llvm back end. dnl FP_CC_LLVM_BACKEND -FP_PROG_LD_HashSize31 -FP_PROG_LD_ReduceMemoryOverheads FP_PROG_LD_IS_GNU FP_PROG_LD_BUILD_ID FP_PROG_LD_NO_COMPACT_UNWIND @@ -901,8 +899,7 @@ if grep ' ' compiler/ghc.cabal.in 2>&1 >/dev/null; then AC_MSG_ERROR([compiler/ghc.cabal.in contains tab characters; please remove them]) fi -AC_CONFIG_FILES([mk/config.mk mk/install.mk mk/project.mk compiler/ghc.cabal ghc/ghc-bin.cabal utils/runghc/runghc.cabal ghc.spec settings docs/users_guide/ug-book.xml docs/users_guide/ug-ent.xml docs/index.html libraries/prologue.txt distrib/configure.ac]) -AC_CONFIG_COMMANDS([mk/stamp-h],[echo timestamp > mk/stamp-h]) +AC_CONFIG_FILES([mk/config.mk mk/install.mk mk/project.mk compiler/ghc.cabal ghc/ghc-bin.cabal utils/runghc/runghc.cabal settings docs/users_guide/ug-book.xml docs/users_guide/ug-ent.xml docs/index.html libraries/prologue.txt distrib/configure.ac]) AC_OUTPUT # We got caught by diff --git a/distrib/configure.ac.in b/distrib/configure.ac.in index 6b20f849d483..4a6944fe17c9 100644 --- a/distrib/configure.ac.in +++ b/distrib/configure.ac.in @@ -72,8 +72,6 @@ AC_SUBST([LdCmd]) FP_GCC_VERSION AC_PROG_CPP -FP_PROG_LD_HashSize31 -FP_PROG_LD_ReduceMemoryOverheads FP_PROG_LD_IS_GNU FP_PROG_LD_BUILD_ID FP_PROG_LD_NO_COMPACT_UNWIND diff --git a/docs/storage-mgt/ldv.tex b/docs/storage-mgt/ldv.tex index 936407c7012d..79f0f2328277 100644 --- a/docs/storage-mgt/ldv.tex +++ b/docs/storage-mgt/ldv.tex @@ -388,7 +388,7 @@ \section{Destruction of Closures} \begin{enumerate} \item A closure is overwritten with a blackhole: - @UPD_BH_UPDATABLE()@ and @UPD_BH_SINGLE_ENTRY()@ in @includes/StgMacros.h@, + @UPD_BH_UPDATABLE()@ in @includes/StgMacros.h@, @threadLazyBlackHole()@ and @threadSqueezeStack()@ in @GC.c@, the entry code for @BLACKHOLE@ closures in @StgMiscClosures.hc@ (a @BLACKHOLE@ closure is changed into a @BLACKHOLE_BQ@ closure). @@ -499,7 +499,7 @@ \subsection{Linear scan of the from-space during garbage collections} \begin{enumerate} \item @threadLazyBlackHole()@ and @threadSqueezeStack()@ in @GC.c@ (for lazy blackholing), -\item @UPD_BH_UPDATABLE()@ and @UPD_BH_SINGLE_ENTRY()@ in +\item @UPD_BH_UPDATABLE()@ in @includes/StgMacros.h@ (for eager blackholing, which isn't the default), \item @updateWithIndirection()@ and @updateWithPermIndirection()@ @@ -639,7 +639,7 @@ \section{Files} with LDVU profiling. \item[ClosureMacros.h] changes macro @SET_PROF_HDR()@. \item[Stg.h] includes th header file @StgLdvProf.h@. -\item[StgMacros.h] changes macros @UPD_BH_UPDATABLE()@ and @UPD_BH_SINGLE_ENTRY()@. +\item[StgMacros.h] changes macros @UPD_BH_UPDATABLE()@. \end{description} @\rts@ directory: diff --git a/docs/users_guide/7.8.1-notes.xml b/docs/users_guide/7.8.1-notes.xml index 20785d09fa63..33e358e3f9ed 100644 --- a/docs/users_guide/7.8.1-notes.xml +++ b/docs/users_guide/7.8.1-notes.xml @@ -3,9 +3,9 @@ Release notes for version 7.8.1 - The significant changes to the various parts of the compiler are - listed in the following sections. There have also been numerous bug - fixes and performance improvements over the 7.6 branch. + The significant changes to the various parts of the compiler are listed + in the following sections. There have also been numerous bug fixes and + performance improvements over the 7.6 branch. @@ -16,18 +16,485 @@ - - - TODO: Format these nicely and expand: - - type holes - - rebindable list syntax - - major changes to the type inference engine - - type level naturals - - overlapping type families - - new codegen - - + + + GHC now supports "type holes" with the + TypeHoles extension. When enabled, the + unbound literal _ may be used during + development in place of a regular identifier, and GHC will + respond with the type necessary to "fill in the hole." + + TODO FIXME: reference. + + + + + + GHC now supports overloading list literals using the new + OverloadedLists extension. + + TODO FIXME: reference. + + + + + + GHC now supports overlapping type family instances when + TypeFamilies are enabled. + + TODO FIXME: reference. + + + + + + There has been significant overhaul of the type inference engine and + constraint solver. + + TODO FIXME: reference. + + + + + + By default, GHC will now unbox all "small" strict fields in a + data type. A "small" data type is one whose size is equivalent + to or smaller than the native word size of the machine. This + means you no longer have to specify UNPACK + pragmas for e.g. Int fields. This also + applies to floating-point values. + + + + + + GHC now has a brand-new I/O manager that scales significantly + better for larger workloads compared to the previous one. It + should scale linearly up to approximately 32 cores. + + + + + + The LLVM backend now supports 128bit SIMD operations. This is + now exploited in both the vector and + dph packages, exposing a high level + interface. + + TODO FIXME: reference. + + + This is only available with the LLVM backend. + + + + + + The new code generator, after significant work by many + individuals over the past several years, is now enabled by + default. This is a complete rewrite of the STG to Cmm + transformation. In general, your programs may get slightly + faster. + + + + The old code generator has been removed completely. + + + + + + TODO: mention dynamic changes + + + + + + TODO: mention new Typeable and + AutoDeriveTypeable + + + + + + + Full details + + Language + + + + TODO FIXME + + + + + + + Compiler + + + + GHC now supports a --show-options flag, + which will dump all of the flags it supports to standard out. + + + + + It's now possible to switch the system linker on Linux + (between GNU gold and GNU ld) at runtime without problem. + + + + + + + GHCi + + + + TODO FIXME + + + + + + + Template Haskell + + + + TODO FIXME + + + + + + + Runtime system + + + + The performance of StablePtrs and + StableNames has been improved. + + + + + + + Build system + + + + GHC >= 7.4 is now required for bootstrapping. + + + + + + + + Libraries + + + There have been some changes that have effected multiple + libraries: + + + + + + TODO FIXME + + + + + + The following libraries have been removed from the GHC tree: + + + + + TODO FIXME + + + + + The following libraries have been added to the GHC tree: + + + + + TODO FIXME + + + + array + + + + Version number XXXX (was XXXX) + + + + + + + base + + + + Version number 4.7.0.0 (was 4.6.0.1) + + + + + The Control.Category module now has the + PolyKinds extension enabled, meaning + that instances of Category no longer + need be of kind * -> * -> * + + + + + There are now Foldable and Traversable + instances for Either a and (,) a + + + + + + + bin-package-db + + + + This is an internal package, and should not be used. + + + + + + + binary + + + + Version number XXXX (was XXXX) + + + + + + + bytestring + + + + Version number XXXX (was XXXX) + + + + + + + Cabal + + + + Version number XXXX (was XXXX) + + + + + + + containers + + + + Version number XXXX (was XXXX) + + + + + + + deepseq + + + + Version number XXXX (was XXXX) + + + + + + + directory + + + + Version number XXXX (was XXXX) + + + + + + + filepath + + + + Version number XXXX (was XXXX) + + + + + + + ghc-prim + + + + Version number XXXX (was XXXX) + + + + + + + haskell98 + + + + Version number XXXX (was XXXX) + + + + + + + haskell2010 + + + + Version number XXXX (was XXXX) + + + + + + + hoopl + + + + Version number XXXX (was XXXX) + + + + + + + hpc + + + + Version number XXXX (was XXXX) + + + + + + + integer-gmp + + + + Version number XXXX (was XXXX) + + + + + + + old-locale + + + + Version number XXXX (was XXXX) + + + + + + + old-time + + + + Version number XXXX (was XXXX) + + + + + + + process + + + + Version number XXXX (was XXXX) + + + + + + + template-haskell + + + + Version number XXXX (was XXXX) + + + + + + + time + + + + Version number XXXX (was XXXX) + + + + + + + unix + + + + Version number XXXX (was XXXX) + + + + + + + Win32 + + + + Version number XXXX (was XXXX) + + + + - diff --git a/docs/users_guide/flags.xml b/docs/users_guide/flags.xml index 5366360f37c7..e034cd731192 100644 --- a/docs/users_guide/flags.xml +++ b/docs/users_guide/flags.xml @@ -59,6 +59,12 @@ mode - + + + display the supported command line options + mode + - + display information about the compiler diff --git a/docs/users_guide/ghci.xml b/docs/users_guide/ghci.xml index 9e8abbb38a9f..66580acbc557 100644 --- a/docs/users_guide/ghci.xml +++ b/docs/users_guide/ghci.xml @@ -2707,6 +2707,16 @@ bar + + + :set prompt2 prompt + + + Sets the string to be used as the continuation prompt + (used when using the :{ command) in GHCi. + + + :set stop diff --git a/docs/users_guide/using.xml b/docs/users_guide/using.xml index 9c6fbf714ad5..1453e7308755 100644 --- a/docs/users_guide/using.xml +++ b/docs/users_guide/using.xml @@ -470,6 +470,18 @@ module X where + + + + ghc --show-options + + + + + Print the supported command line options. This flag can be used for autocompletion in a shell. + + + diff --git a/ghc.mk b/ghc.mk index ad0a381a39c3..e337ddd35f98 100644 --- a/ghc.mk +++ b/ghc.mk @@ -76,6 +76,23 @@ default : all + +################################################## +# Check that we have a new enough 'make' + +HAVE_EVAL := NO +$(eval HAVE_EVAL := YES) + +ifeq "$(HAVE_EVAL)" "NO" +$(error Your make does not support eval. You need GNU make >= 3.81) +endif + +ifeq "$(abspath /)" "" +$(error Your make does not support abspath. You need GNU make >= 3.81) +endif +################################################## + + # Catch make if it runs away into an infinite loop ifeq "$(MAKE_RESTARTS)" "" else ifeq "$(MAKE_RESTARTS)" "1" @@ -511,7 +528,7 @@ utils/runghc/dist-install/package-data.mk: compiler/stage2/package-data.mk utils/mkUserGuidePart/dist/package-data.mk: compiler/stage2/package-data.mk # add the final package.conf dependency: ghc-prim depends on RTS -libraries/ghc-prim/dist-install/package-data.mk : rts/package.conf.inplace +libraries/ghc-prim/dist-install/package-data.mk : rts/dist/package.conf.inplace endif # -------------------------------- @@ -878,7 +895,7 @@ INSTALL_DISTDIR_compiler = stage2 # Now we can do the installation install_packages: install_libexecs -install_packages: rts/package.conf.install +install_packages: rts/dist/package.conf.install $(call INSTALL_DIR,"$(DESTDIR)$(topdir)") $(call removeTrees,"$(INSTALLED_PACKAGE_CONF)") $(call INSTALL_DIR,"$(INSTALLED_PACKAGE_CONF)") @@ -895,7 +912,7 @@ install_packages: rts/package.conf.install '$(prefix)' \ '$(ghclibdir)' \ '$(docdir)/html/libraries')) - "$(INSTALLED_GHC_PKG_REAL)" --force --global-package-db "$(INSTALLED_PACKAGE_CONF)" update rts/package.conf.install + "$(INSTALLED_GHC_PKG_REAL)" --force --global-package-db "$(INSTALLED_PACKAGE_CONF)" update rts/dist/package.conf.install $(foreach p, $(INSTALL_PACKAGES), \ $(call make-command, \ "$(ghc-cabal_INPLACE)" register \ @@ -989,7 +1006,7 @@ unix-binary-dist-prep: $(call removeFiles,$(BIN_DIST_PREP_TAR)) # h means "follow symlinks", e.g. if aclocal.m4 is a symlink to a source # tree then we want to include the real file, not a symlink to it - cd bindistprep && "$(TAR_CMD)" hcf - -T ../$(BIN_DIST_LIST) | bzip2 -c > ../$(BIN_DIST_PREP_TAR_BZ2) + cd bindistprep && "$(TAR_CMD)" hcf - -T ../bindist-list | bzip2 -c > ../$(BIN_DIST_PREP_TAR_BZ2) windows-binary-dist-prep: $(call removeTrees,bindistprep/) @@ -1067,7 +1084,7 @@ SRC_DIST_GHC_DIRS = mk rules docs distrib bindisttest libffi includes \ SRC_DIST_GHC_FILES += \ configure.ac config.guess config.sub configure \ aclocal.m4 README ANNOUNCE HACKING LICENSE Makefile install-sh \ - ghc.spec.in ghc.spec settings.in VERSION \ + settings.in VERSION \ boot packages ghc.mk VERSION : @@ -1203,26 +1220,65 @@ clean_bindistprep: $(call removeTrees,bindistprep/) distclean : clean - $(call removeFiles,config.cache config.status config.log mk/config.h mk/stamp-h) - $(call removeFiles,mk/config.mk mk/are-validating.mk mk/project.mk) - $(call removeFiles,mk/config.mk.old mk/project.mk.old) - $(call removeFiles,settings docs/users_guide/ug-book.xml) - $(call removeFiles,compiler/ghc.cabal compiler/ghc.cabal.old) +# Clean the files that ./validate creates. + $(call removeFiles,mk/are-validating.mk) + +# Clean the files that we ask ./configure to create. + $(call removeFiles,mk/config.mk) + $(call removeFiles,mk/install.mk) + $(call removeFiles,mk/project.mk) + $(call removeFiles,compiler/ghc.cabal) $(call removeFiles,ghc/ghc-bin.cabal) + $(call removeFiles,utils/runghc/runghc.cabal) + $(call removeFiles,settings) + $(call removeFiles,docs/users_guide/ug-book.xml) + $(call removeFiles,docs/users_guide/ug-ent.xml) + $(call removeFiles,docs/index.html) + $(call removeFiles,libraries/prologue.txt) + $(call removeFiles,distrib/configure.ac) + +# ./configure also makes these. + $(call removeFiles,mk/config.h) + +# Internal files generated by ./configure for itself. + $(call removeFiles,config.cache config.status config.log) + +# ./configure build ghc-pwd in utils/ghc-pwd/dist-boot, so clean it up. + $(call removeTrees,utils/ghc-pwd/dist-boot) + +# The root Makefile makes .old versions of some files that configure +# generates, so we clean those too. + $(call removeFiles,mk/config.mk.old) + $(call removeFiles,mk/project.mk.old) + $(call removeFiles,compiler/ghc.cabal.old) + +# Clean the *Config.h files generated by library configure scripts $(call removeFiles,libraries/base/include/HsBaseConfig.h) + $(call removeFiles,libraries/base/include/EventConfig.h) $(call removeFiles,libraries/directory/include/HsDirectoryConfig.h) $(call removeFiles,libraries/process/include/HsProcessConfig.h) $(call removeFiles,libraries/unix/include/HsUnixConfig.h) + $(call removeFiles,libraries/time/include/HsTimeConfig.h) + $(call removeFiles,libraries/time/include/HsTimeConfig.h.in) $(call removeFiles,libraries/old-time/include/HsTimeConfig.h) - $(call removeTrees,utils/ghc-pwd/dist-boot) + +# The library configure scripts also like creating autom4te.cache +# directories, so clean them all up. + $(call removeTrees,$(patsubst %, libraries/%/autom4te.cache, $(PACKAGES_STAGE1) $(PACKAGES_STAGE2))) + +# We make these when making or testing bindists + $(call removeFiles,bindist-list) + $(call removeTrees,bindisttest/a) + +# Not sure why this is being cleaned here. $(call removeTrees,includes/dist-derivedconstants) + +# Finally, clean the inplace tree. $(call removeTrees,inplace) - $(call removeTrees,$(patsubst %, libraries/%/autom4te.cache, $(PACKAGES_STAGE1) $(PACKAGES_STAGE2))) maintainer-clean : distclean $(call removeFiles,configure mk/config.h.in) $(call removeTrees,autom4te.cache $(wildcard libraries/*/autom4te.cache)) - $(call removeFiles,ghc.spec) $(call removeFiles,$(patsubst %, libraries/%/GNUmakefile, \ $(PACKAGES_STAGE1) $(PACKAGES_STAGE2))) $(call removeFiles,$(patsubst %, libraries/%/ghc.mk, $(PACKAGES_STAGE1) $(PACKAGES_STAGE2))) diff --git a/ghc.spec.in b/ghc.spec.in deleted file mode 100644 index 2a70043eea2f..000000000000 --- a/ghc.spec.in +++ /dev/null @@ -1,187 +0,0 @@ -# WARNING: ghc.spec is automatically generated from ghc.spec.in by -# ./configure. Make sure you are editing ghc.spec.in, not ghc.spec. -# -# RPM spec file for GHC -*-rpm-spec-*- -# -# Copyright [1998..2007] The GHC Team -# -# Thanks to Zoltan Vorosbaranyi for suggestions in -# earlier versions and Pixel for coding tips. -# -# This file is subject to the same free software license as GHC. - -%define name ghc -%define version @ProjectVersion@ -%define release @release@ - -Name: %{name} -Version: %{version} -Release: %{release} -License: BSD-like -Group: Development/Languages/Haskell -URL: http://haskell.org/ghc/ -Source0: http://haskell.org/ghc/dist/%{version}/ghc-%{version}-src.tar.bz2 -Source1: http://haskell.org/ghc/dist/%{version}/ghc-%{version}-src-extralibs.tar.bz2 -Packager: Sven Panne -BuildRoot: %{_tmppath}/%{name}-%{version}-build -PreReq: update-alternatives -Requires: gmp, readline -BuildRequires: update-alternatives, alex >= 2.0, happy >= 1.15, ghc >= 5, haddock, docbook-dtd, docbook-xsl-stylesheets, libxslt, libxml2, fop, xmltex, dvips, gmp, readline-devel, mesaglut-devel -Provides: haskell -Summary: The Glasgow Haskell Compiler - -%description -Haskell is the standard lazy purely functional programming language. -The current language version is Haskell 98, agreed in December 1998, -with a revised version published in January 2003. - -GHC is a state-of-the-art programming suite for Haskell. Included is -an optimising compiler generating good code for a variety of -platforms, together with an interactive system for convenient, quick -development. The distribution includes space and time profiling -facilities, a large collection of libraries, and support for various -language extensions, including concurrency, exceptions, and foreign -language interfaces (C, C++, whatever). - -A wide variety of Haskell related resources (tutorials, libraries, -specifications, documentation, compilers, interpreters, references, -contact information, links to research groups) are available from the -Haskell home page at http://haskell.org/. - -Authors: --------- - Krasimir Angelov - Manuel Chakravarty - Koen Claessen - Robert Ennals - Sigbjorn Finne - Gabrielle Keller - Marcin Kowalczyk - Jeff Lewis - Ian Lynagh - Simon Marlow - Sven Panne - Ross Paterson - Simon Peyton Jones - Don Stewart - Volker Stolz - Wolfgang Thaller - Andrew Tolmach - Keith Wansbrough - Michael Weber - plus a dozen helping hands... - -%package prof -Requires: ghc = %{version}-%{release} -Summary: Profiling libraries for GHC -Group: Development/Libraries - -%description prof -Profiling libraries for Glorious Glasgow Haskell Compilation System -(GHC). They should be installed when GHC's profiling subsystem is -needed. - -%prep -%setup -b1 - -%build -test -f configure || perl boot -./configure --prefix=%{_prefix} --mandir=%{_mandir} - -# Don't install these tools, we'll use update-alternatives below. -touch mk/build.mk -echo "NO_INSTALL_RUNHASKELL=YES" >>mk/build.mk -echo "NO_INSTALL_HSC2HS=YES" >>mk/build.mk - -make %{?jobs:-j%jobs} -make html -# Alas, we don't pass make options/arguments down to "libraries", so let's redo make here... -make -C libraries HADDOCK_DOCS=YES -( cd libraries/Cabal && docbook2html doc/Cabal.xml --output doc/Cabal ) -make -C docs/ext-core ps -make -C docs/storage-mgt ps - -%install -# This is a cruel hack: There seems to be no way to install the Haddock -# documentation into the build directory, because DESTDIR is alway prepended. -# Furthermore, rpm removes the target documentation directory before the doc -# macros are processed. Therefore we have to copy things back into safety... :-P -# The right thing would be being able to install directly into the build tree. -make DESTDIR=${RPM_BUILD_ROOT} docdir=%{_datadir}/doc/packages/%{name} HADDOCK_DOCS=YES install install-docs -mkdir html-docs -cp -a ${RPM_BUILD_ROOT}%{_datadir}/doc/packages/%{name}/{index.html,libraries} html-docs -# Use version-less hsc2hs out of the way, we use update-alternatives. -mv ${RPM_BUILD_ROOT}%{_prefix}/bin/hsc2hs ${RPM_BUILD_ROOT}%{_prefix}/bin/hsc2hs-ghc - -# generate the file list for lib/ _excluding_ all files needed for profiling -# only -# -# * generating file lists in a BUILD_ROOT spec is a bit tricky: the file list -# has to contain complete paths, _but_ without the BUILD_ROOT, we also do -# _not_ want have directory names in the list; furthermore, we have to make -# sure that any leading / is removed from %{_prefix}/lib, as find has to -# interpret the argument as a relative path; however, we have to include the -# leading / again in the final file list (otherwise, rpm complains) -# * isn't there an easier way to do all this? -# -dir=`pwd` -cd ${RPM_BUILD_ROOT} -libdir=`echo %{_prefix}/lib | sed 's|^/||'` -find $libdir ! -type d ! -name '*.p_hi' ! -name '*_p.a' -print | sed 's|^|/|' > $dir/rpm-noprof-lib-files -find $libdir ! -type d \( -name '*.p_hi' -or -name '*_p.a' \) -print | sed 's|^|/|' > $dir/rpm-prof-lib-files -cd $dir - -%clean -rm -rf ${RPM_BUILD_ROOT} - -%post -# Alas, GHC, Hugs and nhc all come with different set of tools in addition to -# a runFOO: -# -# * GHC: hsc2hs -# * Hugs: hsc2hs, cpphs -# * nhc: cpphs -# -# Therefore it is currently not possible to use --slave below to form link -# groups under a single name 'runhaskell'. Either these tools should be -# disentangled from the Haskell implementations or all implementations should -# have the same set of tools. *sigh* -update-alternatives --install %{_bindir}/runhaskell runhaskell %{_bindir}/runghc 500 -update-alternatives --install %{_bindir}/hsc2hs hsc2hs %{_bindir}/hsc2hs-ghc 500 - -%preun -if test "$1" = 0; then - update-alternatives --remove runhaskell %{_bindir}/runghc - update-alternatives --remove hsc2hs %{_bindir}/hsc2hs-ghc -fi - -%files -f rpm-noprof-lib-files -%defattr(-,root,root) -%doc docs/docbook-cheat-sheet/docbook-cheat-sheet -%doc ANNOUNCE -%doc LICENSE -%doc README -%doc docs/comm -%doc docs/ext-core/core.ps -%doc docs/storage-mgt/ldv.ps -%doc docs/storage-mgt/rp.ps -%doc docs/storage-mgt/sm.ps -%doc docs/users_guide/users_guide -%doc libraries/Cabal/doc/Cabal -%doc html-docs/* -%{_mandir}/man1/ghc.1* -%{_prefix}/bin/ghc -%{_prefix}/bin/ghc-%{version} -%{_prefix}/bin/ghc-pkg -%{_prefix}/bin/ghc-pkg-%{version} -%{_prefix}/bin/ghci -%{_prefix}/bin/ghci-%{version} -%{_prefix}/bin/ghcprof -%{_prefix}/bin/hp2ps -%{_prefix}/bin/hpc -%{_prefix}/bin/hsc2hs-ghc -%{_prefix}/bin/hsc2hs-%{version} -%{_prefix}/bin/runghc - -%files prof -f rpm-prof-lib-files -%defattr(-,root,root) diff --git a/ghc/GhciMonad.hs b/ghc/GhciMonad.hs index e61e1409dec5..a3fe632493a6 100644 --- a/ghc/GhciMonad.hs +++ b/ghc/GhciMonad.hs @@ -64,7 +64,7 @@ data GHCiState = GHCiState progname :: String, args :: [String], prompt :: String, - def_prompt :: String, + prompt2 :: String, editor :: String, stop :: String, options :: [GHCiOption], diff --git a/ghc/InteractiveUI.hs b/ghc/InteractiveUI.hs index 8fdf92142518..4ff822f03b98 100644 --- a/ghc/InteractiveUI.hs +++ b/ghc/InteractiveUI.hs @@ -109,7 +109,8 @@ data GhciSettings = GhciSettings { availableCommands :: [Command], shortHelpText :: String, fullHelpText :: String, - defPrompt :: String + defPrompt :: String, + defPrompt2 :: String } defaultGhciSettings :: GhciSettings @@ -118,7 +119,8 @@ defaultGhciSettings = availableCommands = ghciCommands, shortHelpText = defShortHelpText, fullHelpText = defFullHelpText, - defPrompt = default_prompt + defPrompt = default_prompt, + defPrompt2 = default_prompt2 } ghciWelcomeMsg :: String @@ -285,6 +287,7 @@ defFullHelpText = " :set args ... set the arguments returned by System.getArgs\n" ++ " :set prog set the value returned by System.getProgName\n" ++ " :set prompt set the prompt used in GHCi\n" ++ + " :set prompt2 set the continuation prompt used in GHCi\n" ++ " :set editor set the command used for :edit\n" ++ " :set stop [] set the command to run when a breakpoint is hit\n" ++ " :unset