auto-update-0.1.4/0000755000000000000000000000000012714021620012125 5ustar0000000000000000auto-update-0.1.4/auto-update.cabal0000644000000000000000000000230112714021620015335 0ustar0000000000000000name: auto-update version: 0.1.4 synopsis: Efficiently run periodic, on-demand actions description: API docs and the README are available at . homepage: https://github.com/yesodweb/wai license: MIT license-file: LICENSE author: Michael Snoyman maintainer: michael@snoyman.com category: Control build-type: Simple extra-source-files: README.md ChangeLog.md cabal-version: >=1.10 library ghc-options: -Wall exposed-modules: Control.AutoUpdate Control.Debounce Control.Reaper other-modules: Control.AutoUpdate.Util build-depends: base >= 4 && < 5 default-language: Haskell2010 -- Test suite is currently not robust enough, gives too many false negatives. -- test-suite spec -- main-is: Spec.hs -- other-modules: Control.AutoUpdateSpec -- Control.ReaperSpec -- hs-source-dirs: test -- type: exitcode-stdio-1.0 -- build-depends: base, auto-update, hspec -- default-language: Haskell2010 auto-update-0.1.4/ChangeLog.md0000644000000000000000000000033112714021620014273 0ustar0000000000000000## 0.1.4 * Provide updateActionModify API in AutoUpdate [#547](https://github.com/yesodweb/wai/pull/547) ## 0.1.3.1 * Doc improvements ## 0.1.3 * Adding a new AIP - reaperKill ## 0.1.2 * Added Control.Debounce auto-update-0.1.4/LICENSE0000644000000000000000000000204312714021620013131 0ustar0000000000000000Copyright (c) 2014 Michael Snoyman Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. auto-update-0.1.4/README.md0000644000000000000000000000124312714021620013404 0ustar0000000000000000## auto-update A common problem is the desire to have an action run at a scheduled interval, but only if it is needed. For example, instead of having every web request result in a new `getCurrentTime` call, we'd like to have a single worker thread run every second, updating an `IORef`. However, if the request frequency is less than once per second, this is a pessimization, and worse, kills idle GC. This library allows you to define actions which will either be performed by a dedicated thread or, in times of low volume, will be executed by the calling thread. For original use case, see [yesod-scaffold issue #15](https://github.com/yesodweb/yesod-scaffold/pull/15). auto-update-0.1.4/Setup.hs0000644000000000000000000000005612714021620013562 0ustar0000000000000000import Distribution.Simple main = defaultMain auto-update-0.1.4/Control/0000755000000000000000000000000012714021620013545 5ustar0000000000000000auto-update-0.1.4/Control/AutoUpdate.hs0000644000000000000000000001772512714021620016170 0ustar0000000000000000{-# LANGUAGE CPP #-} -- | In a multithreaded environment, running actions on a regularly scheduled -- background thread can dramatically improve performance. -- For example, web servers need to return the current time with each HTTP response. -- For a high-volume server, it's much faster for a dedicated thread to run every -- second, and write the current time to a shared 'IORef', than it is for each -- request to make its own call to 'getCurrentTime'. -- -- But for a low-volume server, whose request frequency is less than once per -- second, that approach will result in /more/ calls to 'getCurrentTime' than -- necessary, and worse, kills idle GC. -- -- This library solves that problem by allowing you to define actions which will -- either be performed by a dedicated thread, or, in times of low volume, will -- be executed by the calling thread. -- -- Example usage: -- -- @ -- import "Data.Time" -- import "Control.AutoUpdate" -- -- getTime <- 'mkAutoUpdate' 'defaultUpdateSettings' -- { 'updateAction' = 'Data.Time.Clock.getCurrentTime' -- , 'updateFreq' = 1000000 -- The default frequency, once per second -- } -- currentTime <- getTime -- @ -- -- For more examples, . module Control.AutoUpdate ( -- * Type UpdateSettings , defaultUpdateSettings -- * Accessors , updateAction , updateFreq , updateSpawnThreshold -- * Creation , mkAutoUpdate , mkAutoUpdateWithModify ) where #if __GLASGOW_HASKELL__ < 709 import Control.Applicative ((<*>)) #endif import Control.Concurrent (forkIO, threadDelay) import Control.Concurrent.MVar (newEmptyMVar, putMVar, readMVar, takeMVar, tryPutMVar) import Control.Exception (SomeException, catch, mask_, throw, try) import Control.Monad (void) import Data.IORef (newIORef, readIORef, writeIORef) -- | Default value for creating an 'UpdateSettings'. -- -- @since 0.1.0 defaultUpdateSettings :: UpdateSettings () defaultUpdateSettings = UpdateSettings { updateFreq = 1000000 , updateSpawnThreshold = 3 , updateAction = return () } -- | Settings to control how values are updated. -- -- This should be constructed using 'defaultUpdateSettings' and record -- update syntax, e.g.: -- -- @ -- let settings = 'defaultUpdateSettings' { 'updateAction' = 'Data.Time.Clock.getCurrentTime' } -- @ -- -- @since 0.1.0 data UpdateSettings a = UpdateSettings { updateFreq :: Int -- ^ Microseconds between update calls. Same considerations as -- 'threadDelay' apply. -- -- Default: 1 second (1000000) -- -- @since 0.1.0 , updateSpawnThreshold :: Int -- ^ NOTE: This value no longer has any effect, since worker threads are -- dedicated instead of spawned on demand. -- -- Previously, this determined how many times the data must be requested -- before we decide to spawn a dedicated thread. -- -- Default: 3 -- -- @since 0.1.0 , updateAction :: IO a -- ^ Action to be performed to get the current value. -- -- Default: does nothing. -- -- @since 0.1.0 } -- | Generate an action which will either read from an automatically -- updated value, or run the update action in the current thread. -- -- @since 0.1.0 mkAutoUpdate :: UpdateSettings a -> IO (IO a) mkAutoUpdate us = mkAutoUpdateHelper us Nothing -- | Generate an action which will either read from an automatically -- updated value, or run the update action in the current thread if -- the first time or the provided modify action after that. -- -- @since 0.1.4 mkAutoUpdateWithModify :: UpdateSettings a -> (a -> IO a) -> IO (IO a) mkAutoUpdateWithModify us f = mkAutoUpdateHelper us (Just f) mkAutoUpdateHelper :: UpdateSettings a -> Maybe (a -> IO a) -> IO (IO a) mkAutoUpdateHelper us updateActionModify = do -- A baton to tell the worker thread to generate a new value. needsRunning <- newEmptyMVar -- The initial response variable. Response variables allow the requesting -- thread to block until a value is generated by the worker thread. responseVar0 <- newEmptyMVar -- The current value, if available. We start off with a Left value -- indicating no value is available, and the above-created responseVar0 to -- give a variable to block on. currRef <- newIORef $ Left responseVar0 -- This is used to set a value in the currRef variable when the worker -- thread exits. In reality, that value should never be used, since the -- worker thread exiting only occurs if an async exception is thrown, which -- should only occur if there are no references to needsRunning left. -- However, this handler will make error messages much clearer if there's a -- bug in the implementation. let fillRefOnExit f = do eres <- try f case eres of Left e -> writeIORef currRef $ error $ "Control.AutoUpdate.mkAutoUpdate: worker thread exited with exception: " ++ show (e :: SomeException) Right () -> writeIORef currRef $ error $ "Control.AutoUpdate.mkAutoUpdate: worker thread exited normally, " ++ "which should be impossible due to usage of infinite loop" -- fork the worker thread immediately. Note that we mask async exceptions, -- but *not* in an uninterruptible manner. This will allow a -- BlockedIndefinitelyOnMVar exception to still be thrown, which will take -- down this thread when all references to the returned function are -- garbage collected, and therefore there is no thread that can fill the -- needsRunning MVar. -- -- Note that since we throw away the ThreadId of this new thread and never -- calls myThreadId, normal async exceptions can never be thrown to it, -- only RTS exceptions. mask_ $ void $ forkIO $ fillRefOnExit $ do -- This infinite loop makes up out worker thread. It takes an a -- responseVar value where the next value should be putMVar'ed to for -- the benefit of any requesters currently blocked on it. let loop responseVar maybea = do -- block until a value is actually needed takeMVar needsRunning -- new value requested, so run the updateAction a <- catchSome $ maybe (updateAction us) id (updateActionModify <*> maybea) -- we got a new value, update currRef and lastValue writeIORef currRef $ Right a putMVar responseVar a -- delay until we're needed again threadDelay $ updateFreq us -- delay's over. create a new response variable and set currRef -- to use it, so that the next requester will block on that -- variable. Then loop again with the updated response -- variable. responseVar' <- newEmptyMVar writeIORef currRef $ Left responseVar' loop responseVar' (Just a) -- Kick off the loop, with the initial responseVar0 variable. loop responseVar0 Nothing return $ do mval <- readIORef currRef case mval of Left responseVar -> do -- no current value, force the worker thread to run... void $ tryPutMVar needsRunning () -- and block for the result from the worker readMVar responseVar -- we have a current value, use it Right val -> return val -- | Turn a runtime exception into an impure exception, so that all 'IO' -- actions will complete successfully. This simply defers the exception until -- the value is forced. catchSome :: IO a -> IO a catchSome act = Control.Exception.catch act $ \e -> return $ throw (e :: SomeException) auto-update-0.1.4/Control/Debounce.hs0000644000000000000000000000516612714021620015635 0ustar0000000000000000{-# LANGUAGE ScopedTypeVariables #-} -- | Debounce an action, ensuring it doesn't occur more than once for a given -- period of time. -- -- This is useful as an optimization, for example to ensure that logs are only -- flushed to disk at most once per second. -- -- Example usage: -- -- @ -- printString <- 'mkDebounce' 'defaultDebounceSettings' -- { 'debounceAction' = putStrLn "Running action" -- , 'debounceFreq' = 5000000 -- 5 seconds -- } -- @ -- -- >>> printString -- Running action -- >>> printString -- -- Running action -- -- See the fast-logger package ("System.Log.FastLogger") for real-world usage. -- -- @since 0.1.2 module Control.Debounce ( -- * Type DebounceSettings , defaultDebounceSettings -- * Accessors , debounceFreq , debounceAction -- * Creation , mkDebounce ) where import Control.Concurrent (forkIO, threadDelay) import Control.Concurrent.MVar (newEmptyMVar, takeMVar, tryPutMVar) import Control.Exception (SomeException, handle, mask_) import Control.Monad (forever, void) -- | Settings to control how debouncing should work. -- -- This should be constructed using 'defaultDebounceSettings' and record -- update syntax, e.g.: -- -- @ -- let settings = 'defaultDebounceSettings' { 'debounceAction' = flushLog } -- @ -- -- @since 0.1.2 data DebounceSettings = DebounceSettings { debounceFreq :: Int -- ^ Microseconds lag required between subsequence calls to the debounced -- action. -- -- Default: 1 second (1000000) -- -- @since 0.1.2 , debounceAction :: IO () -- ^ Action to be performed. -- -- Note: all exceptions thrown by this action will be silently discarded. -- -- Default: does nothing. -- -- @since 0.1.2 } -- | Default value for creating a 'DebounceSettings'. -- -- @since 0.1.2 defaultDebounceSettings :: DebounceSettings defaultDebounceSettings = DebounceSettings { debounceFreq = 1000000 , debounceAction = return () } -- | Generate an action which will trigger the debounced action to be -- performed. The action will either be performed immediately, or after the -- current cooldown period has expired. -- -- @since 0.1.2 mkDebounce :: DebounceSettings -> IO (IO ()) mkDebounce (DebounceSettings freq action) = do baton <- newEmptyMVar mask_ $ void $ forkIO $ forever $ do takeMVar baton ignoreExc action threadDelay freq return $ void $ tryPutMVar baton () ignoreExc :: IO () -> IO () ignoreExc = handle $ \(_ :: SomeException) -> return () auto-update-0.1.4/Control/Reaper.hs0000644000000000000000000002352112714021620015322 0ustar0000000000000000{-# LANGUAGE RecordWildCards #-} {-# LANGUAGE BangPatterns #-} -- | This module provides the ability to create reapers: dedicated cleanup -- threads. These threads will automatically spawn and die based on the -- presence of a workload to process on. Example uses include: -- -- * Killing long-running jobs -- * Closing unused connections in a connection pool -- * Pruning a cache of old items (see example below) -- -- For real-world usage, search the -- for imports of "Control.Reaper". module Control.Reaper ( -- * Example: Regularly cleaning a cache -- $example1 -- * Settings ReaperSettings , defaultReaperSettings -- * Accessors , reaperAction , reaperDelay , reaperCons , reaperNull , reaperEmpty -- * Type , Reaper(..) -- * Creation , mkReaper -- * Helper , mkListAction ) where import Control.AutoUpdate.Util (atomicModifyIORef') import Control.Concurrent (forkIO, threadDelay, killThread, ThreadId) import Control.Exception (mask_) import Data.IORef (IORef, newIORef, readIORef, writeIORef) -- | Settings for creating a reaper. This type has two parameters: -- @workload@ gives the entire workload, whereas @item@ gives an -- individual piece of the queue. A common approach is to have @workload@ -- be a list of @item@s. This is encouraged by 'defaultReaperSettings' and -- 'mkListAction'. -- -- @since 0.1.1 data ReaperSettings workload item = ReaperSettings { reaperAction :: workload -> IO (workload -> workload) -- ^ The action to perform on a workload. The result of this is a -- \"workload modifying\" function. In the common case of using lists, -- the result should be a difference list that prepends the remaining -- workload to the temporary workload. For help with setting up such -- an action, see 'mkListAction'. -- -- Default: do nothing with the workload, and then prepend it to the -- temporary workload. This is incredibly useless; you should -- definitely override this default. -- -- @since 0.1.1 , reaperDelay :: {-# UNPACK #-} !Int -- ^ Number of microseconds to delay between calls of 'reaperAction'. -- -- Default: 30 seconds. -- -- @since 0.1.1 , reaperCons :: item -> workload -> workload -- ^ Add an item onto a workload. -- -- Default: list consing. -- -- @since 0.1.1 , reaperNull :: workload -> Bool -- ^ Check if a workload is empty, in which case the worker thread -- will shut down. -- -- Default: 'null'. -- -- @since 0.1.1 , reaperEmpty :: workload -- ^ An empty workload. -- -- Default: empty list. -- -- @since 0.1.1 } -- | Default @ReaperSettings@ value, biased towards having a list of work -- items. -- -- @since 0.1.1 defaultReaperSettings :: ReaperSettings [item] item defaultReaperSettings = ReaperSettings { reaperAction = \wl -> return (wl ++) , reaperDelay = 30000000 , reaperCons = (:) , reaperNull = null , reaperEmpty = [] } -- | A data structure to hold reaper APIs. data Reaper workload item = Reaper { -- | Adding an item to the workload reaperAdd :: item -> IO () -- | Reading workload. , reaperRead :: IO workload -- | Stopping the reaper thread if exists. -- The current workload is returned. , reaperStop :: IO workload -- | Killing the reaper thread immediately if exists. , reaperKill :: IO () } -- | State of reaper. data State workload = NoReaper -- ^ No reaper thread | Workload workload -- ^ The current jobs -- | Create a reaper addition function. This function can be used to add -- new items to the workload. Spawning of reaper threads will be handled -- for you automatically. -- -- @since 0.1.1 mkReaper :: ReaperSettings workload item -> IO (Reaper workload item) mkReaper settings@ReaperSettings{..} = do stateRef <- newIORef NoReaper tidRef <- newIORef Nothing return Reaper { reaperAdd = add settings stateRef tidRef , reaperRead = readRef stateRef , reaperStop = stop stateRef , reaperKill = kill tidRef } where readRef stateRef = do mx <- readIORef stateRef case mx of NoReaper -> return reaperEmpty Workload wl -> return wl stop stateRef = atomicModifyIORef' stateRef $ \mx -> case mx of NoReaper -> (NoReaper, reaperEmpty) Workload x -> (Workload reaperEmpty, x) kill tidRef = do mtid <- readIORef tidRef case mtid of Nothing -> return () Just tid -> killThread tid add :: ReaperSettings workload item -> IORef (State workload) -> IORef (Maybe ThreadId) -> item -> IO () add settings@ReaperSettings{..} stateRef tidRef item = mask_ $ do next <- atomicModifyIORef' stateRef cons next where cons NoReaper = let !wl = reaperCons item reaperEmpty in (Workload wl, spawn settings stateRef tidRef) cons (Workload wl) = let wl' = reaperCons item wl in (Workload wl', return ()) spawn :: ReaperSettings workload item -> IORef (State workload) -> IORef (Maybe ThreadId) -> IO () spawn settings stateRef tidRef = do tid <- forkIO $ reaper settings stateRef tidRef writeIORef tidRef $ Just tid reaper :: ReaperSettings workload item -> IORef (State workload) -> IORef (Maybe ThreadId) -> IO () reaper settings@ReaperSettings{..} stateRef tidRef = do threadDelay reaperDelay -- Getting the current jobs. Push an empty job to the reference. wl <- atomicModifyIORef' stateRef swapWithEmpty -- Do the jobs. A function to merge the left jobs and -- new jobs is returned. !merge <- reaperAction wl -- Merging the left jobs and new jobs. -- If there is no jobs, this thread finishes. next <- atomicModifyIORef' stateRef (check merge) next where swapWithEmpty NoReaper = error "Control.Reaper.reaper: unexpected NoReaper (1)" swapWithEmpty (Workload wl) = (Workload reaperEmpty, wl) check _ NoReaper = error "Control.Reaper.reaper: unexpected NoReaper (2)" check merge (Workload wl) -- If there is no job, reaper is terminated. | reaperNull wl' = (NoReaper, writeIORef tidRef Nothing) -- If there are jobs, carry them out. | otherwise = (Workload wl', reaper settings stateRef tidRef) where wl' = merge wl -- | A helper function for creating 'reaperAction' functions. You would -- provide this function with a function to process a single work item and -- return either a new work item, or @Nothing@ if the work item is -- expired. -- -- @since 0.1.1 mkListAction :: (item -> IO (Maybe item')) -> [item] -> IO ([item'] -> [item']) mkListAction f = go id where go !front [] = return front go !front (x:xs) = do my <- f x let front' = case my of Nothing -> front Just y -> front . (y:) go front' xs -- $example1 -- In this example code, we use a 'Data.Map.Strict.Map' to cache fibonacci numbers, and a 'Reaper' to prune the cache. -- -- The @main@ function first creates a 'Reaper', with fields to initialize the -- cache ('reaperEmpty'), add items to it ('reaperCons'), and prune it ('reaperAction'). -- The reaper will run every two seconds ('reaperDelay'), but will stop running while -- 'reaperNull' is true. -- -- @main@ then loops infinitely ('Control.Monad.forever'). Each second it calculates the fibonacci number -- for a value between 30 and 34, first trying the cache ('reaperRead' and 'Data.Map.Strict.lookup'), -- then falling back to manually calculating it (@fib@) -- and updating the cache with the result ('reaperAdd') -- -- @clean@ simply removes items cached for more than 10 seconds. -- This function is where you would perform IO-related cleanup, -- like killing threads or closing connections, if that was the purpose of your reaper. -- -- @ -- module Main where -- -- import "Data.Time" (UTCTime, getCurrentTime, diffUTCTime) -- import "Control.Reaper" -- import "Control.Concurrent" (threadDelay) -- import "Data.Map.Strict" (Map) -- import qualified "Data.Map.Strict" as Map -- import "Control.Monad" (forever) -- import "System.Random" (getStdRandom, randomR) -- -- fib :: 'Int' -> 'Int' -- fib 0 = 0 -- fib 1 = 1 -- fib n = fib (n-1) + fib (n-2) -- -- type Cache = 'Data.Map.Strict.Map' 'Int' ('Int', 'Data.Time.Clock.UTCTime') -- -- main :: IO () -- main = do -- reaper <- 'mkReaper' 'defaultReaperSettings' -- { 'reaperEmpty' = Map.'Data.Map.Strict.empty' -- , 'reaperCons' = \\(k, v, time) workload -> Map.'Data.Map.Strict.insert' k (v, time) workload -- , 'reaperAction' = clean -- , 'reaperDelay' = 1000000 * 2 -- Clean every 2 seconds -- , 'reaperNull' = Map.'Data.Map.Strict.null' -- } -- forever $ do -- fibArg <- 'System.Random.getStdRandom' ('System.Random.randomR' (30,34)) -- cache <- 'reaperRead' reaper -- let cachedResult = Map.'Data.Map.Strict.lookup' fibArg cache -- case cachedResult of -- 'Just' (fibResult, _createdAt) -> 'putStrLn' $ "Found in cache: `fib " ++ 'show' fibArg ++ "` " ++ 'show' fibResult -- 'Nothing' -> do -- let fibResult = fib fibArg -- 'putStrLn' $ "Calculating `fib " ++ 'show' fibArg ++ "` " ++ 'show' fibResult -- time <- 'Data.Time.Clock.getCurrentTime' -- ('reaperAdd' reaper) (fibArg, fibResult, time) -- 'threadDelay' 1000000 -- 1 second -- -- -- Remove items > 10 seconds old -- clean :: Cache -> IO (Cache -> Cache) -- clean oldMap = do -- currentTime <- 'Data.Time.Clock.getCurrentTime' -- let pruned = Map.'Data.Map.Strict.filter' (\\(_, createdAt) -> currentTime \`diffUTCTime\` createdAt < 10.0) oldMap -- return (\\newData -> Map.'Data.Map.Strict.union' pruned newData) -- @auto-update-0.1.4/Control/AutoUpdate/0000755000000000000000000000000012714021620015620 5ustar0000000000000000auto-update-0.1.4/Control/AutoUpdate/Util.hs0000644000000000000000000000141612714021620017073 0ustar0000000000000000{-# LANGUAGE CPP #-} module Control.AutoUpdate.Util ( atomicModifyIORef' ) where #ifndef MIN_VERSION_base #define MIN_VERSION_base(x,y,z) 1 #endif #if MIN_VERSION_base(4,6,0) import Data.IORef (atomicModifyIORef') #else import Data.IORef (IORef, atomicModifyIORef) -- | Strict version of 'atomicModifyIORef'. This forces both the value stored -- in the 'IORef' as well as the value returned. atomicModifyIORef' :: IORef a -> (a -> (a,b)) -> IO b atomicModifyIORef' ref f = do c <- atomicModifyIORef ref (\x -> let (a, b) = f x -- Lazy application of "f" in (a, a `seq` b)) -- Lazy application of "seq" -- The following forces "a `seq` b", so it also forces "f x". c `seq` return c #endif