-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/


-- | A reliable at-least-once job queue built on top of redis.
--   
--   See README.
@package hworker
@version 0.1.0.1


-- | This module contains an at-least-once persistent job processing queue
--   backed by Redis. It depends upon Redis not losing data once it has
--   acknowledged it, and guaranteeing the atomicity that is specified for
--   commands like EVAL (ie, that if you do several things within an EVAL,
--   they will all happen or none will happen). Nothing has been tested
--   with Redis clusters (and it likely will not work).
--   
--   An example use is the following (see the repository for a slightly
--   expanded version; also, the test cases in the repository are also good
--   examples):
--   
--   <pre>
--   data PrintJob = Print deriving (Generic, Show)
--   data State = State (MVar Int)
--   instance ToJSON PrintJob
--   instance FromJSON PrintJob
--   
--   instance Job State PrintJob where
--     job (State mvar) Print =
--       do v &lt;- takeMVar mvar
--          putMVar mvar (v + 1)
--          putStrLn $ "A(" ++ show v ++ ")"
--          return Success
--   
--   main = do mvar &lt;- newMVar 0
--             hworker &lt;- create "printer" (State mvar)
--             forkIO (worker hworker)
--             forkIO (monitor hworker)
--             forkIO (forever $ queue hworker Print &gt;&gt; threadDelay 1000000)
--             forever (threadDelay 1000000)
--   </pre>
module System.Hworker

-- | Jobs can return <a>Success</a>, <a>Retry</a> (with a message), or
--   <a>Failure</a> (with a message). Jobs that return <a>Failure</a> are
--   stored in the <a>failed</a> queue and are not re-run. Jobs that return
--   <a>Retry</a> are re-run.
data Result
Success :: Result
Retry :: Text -> Result
Failure :: Text -> Result

-- | Each Worker that you create will be responsible for one type of job,
--   defined by a <a>Job</a> instance.
--   
--   The job can do many different things (as the value can be a variant),
--   but be careful not to break deserialization if you add new things it
--   can do.
--   
--   The job will take some state (passed as the <tt>s</tt> parameter),
--   which does not vary based on the job, and the actual job data
--   structure. The data structure (the <tt>t</tt> parameter) will be
--   stored and copied a few times in Redis while in the lifecycle, so
--   generally it is a good idea for it to be relatively small (and have it
--   be able to look up data that it needs while the job in running).
--   
--   Finally, while deriving FromJSON and ToJSON instances automatically
--   might seem like a good idea, you will most likely be better off
--   defining them manually, so you can make sure they are backwards
--   compatible if you change them, as any jobs that can't be deserialized
--   will not be run (and will end up in the <a>broken</a> queue). This
--   will only happen if the queue is non-empty when you replce the running
--   application version, but this is obviously possible and could be
--   likely depending on your use.
class (FromJSON t, ToJSON t, Show t) => Job s t | s -> t
job :: Job s t => s -> t -> IO Result

-- | The worker data type - it is parametrized be the worker state (the
--   <tt>s</tt>) and the job type (the <tt>t</tt>).
data Hworker s t

-- | The main configuration for workers.
--   
--   Each pool of workers should have a unique <a>hwconfigName</a>, as the
--   queues are set up by that name, and if you have different types of
--   data written in, they will likely be unable to be deserialized (and
--   thus could end up in the <a>broken</a> queue).
--   
--   The <a>hwconfigLogger</a> defaults to writing to stdout, so you will
--   likely want to replace that with something appropriate (like from a
--   logging package).
--   
--   The <a>hwconfigTimeout</a> is really important. It determines the
--   length of time after a job is started before the <a>monitor</a> will
--   decide that the job must have died and will restart it. If it is
--   shorter than the length of time that a normal job takes to complete,
--   the jobs _will_ be run multiple times. This is _semantically_ okay, as
--   this is an at-least-once processor, but obviously won't be desirable.
--   It defaults to 120 seconds.
--   
--   The <a>hwconfigExceptionBehavior</a> controls what happens when an
--   exception is thrown within a job.
--   
--   <a>hwconfigFailedQueueSize</a> controls how many <a>failed</a> jobs
--   will be kept. It defaults to 1000.
data HworkerConfig s
HworkerConfig :: Text -> s -> RedisConnection -> ExceptionBehavior -> (forall a. Show a => a -> IO ()) -> NominalDiffTime -> Int -> Bool -> HworkerConfig s
[hwconfigName] :: HworkerConfig s -> Text
[hwconfigState] :: HworkerConfig s -> s
[hwconfigRedisConnectInfo] :: HworkerConfig s -> RedisConnection
[hwconfigExceptionBehavior] :: HworkerConfig s -> ExceptionBehavior
[hwconfigLogger] :: HworkerConfig s -> forall a. Show a => a -> IO ()
[hwconfigTimeout] :: HworkerConfig s -> NominalDiffTime
[hwconfigFailedQueueSize] :: HworkerConfig s -> Int
[hwconfigDebug] :: HworkerConfig s -> Bool

-- | What should happen when an unexpected exception is thrown in a job -
--   it can be treated as either a <a>Failure</a> (the default) or a
--   <a>Retry</a> (if you know the only exceptions are triggered by
--   intermittent problems).
data ExceptionBehavior
RetryOnException :: ExceptionBehavior
FailOnException :: ExceptionBehavior

-- | When configuring a worker, you can tell it to use an existing redis
--   connection pool (which you may have for the rest of your application).
--   Otherwise, you can specify connection info. By default, hworker tries
--   to connect to localhost, which may not be true for your production
--   application.
data RedisConnection
RedisConnectInfo :: ConnectInfo -> RedisConnection
RedisConnection :: Connection -> RedisConnection

-- | The default worker config - it needs a name and a state (as those will
--   always be unique).
defaultHworkerConfig :: Text -> s -> HworkerConfig s

-- | Create a new worker with the default <a>HworkerConfig</a>.
--   
--   Note that you must create at least one <a>worker</a> and
--   <a>monitor</a> for the queue to actually process jobs (and for it to
--   retry ones that time-out).
create :: Job s t => Text -> s -> IO (Hworker s t)

-- | Create a new worker with a specified <a>HworkerConfig</a>.
--   
--   Note that you must create at least one <a>worker</a> and
--   <a>monitor</a> for the queue to actually process jobs (and for it to
--   retry ones that time-out).
createWith :: Job s t => HworkerConfig s -> IO (Hworker s t)

-- | Destroy a worker. This will delete all the queues, clearing out all
--   existing <a>jobs</a>, the <a>broken</a> and <a>failed</a> queues.
--   There is no need to do this in normal applications (and most likely,
--   you won't want to).
destroy :: Job s t => Hworker s t -> IO ()

-- | Creates a new worker thread. This is blocking, so you will want to
--   <a>forkIO</a> this into a thread. You can have any number of these
--   (and on any number of servers); the more there are, the faster jobs
--   will be processed.
worker :: Job s t => Hworker s t -> IO ()

-- | Start a monitor. Like <a>worker</a>, this is blocking, so should be
--   started in a thread. This is responsible for retrying jobs that time
--   out (which can happen if the processing thread is killed, for
--   example). You need to have at least one of these running to have the
--   retry happen, but it is safe to have any number running.
monitor :: Job s t => Hworker s t -> IO ()

-- | Adds a job to the queue. Returns whether the operation succeeded.
queue :: Job s t => Hworker s t -> t -> IO Bool

-- | Returns all pending jobs.
jobs :: Job s t => Hworker s t -> IO [t]

-- | Returns all failed jobs. This is capped at the most recent
--   <tt>hworkerconfigFailedQueueSize</tt> jobs that returned
--   <a>Failure</a> (or threw an exception when
--   <tt>hworkerconfigExceptionBehavior</tt> is <a>FailOnException</a>).
failed :: Job s t => Hworker s t -> IO [t]

-- | Returns the jobs that could not be deserialized, most likely because
--   you changed the 'ToJSON'/'FromJSON' instances for you job in a way
--   that resulted in old jobs not being able to be converted back from
--   json. Another reason for jobs to end up here (and much worse) is if
--   you point two instances of <a>Hworker</a>, with different job types,
--   at the same queue (ie, you re-use the name). Then anytime a worker
--   from one queue gets a job from the other it would think it is broken.
broken :: Hworker s t -> IO [(ByteString, UTCTime)]

-- | Logs the contents of the jobqueue and the inprogress queue at
--   <tt>microseconds</tt> intervals.
debugger :: Job s t => Int -> Hworker s t -> IO ()
instance GHC.Show.Show System.Hworker.Result
instance GHC.Generics.Generic System.Hworker.Result
instance Data.Aeson.Types.ToJSON.ToJSON System.Hworker.Result
instance Data.Aeson.Types.FromJSON.FromJSON System.Hworker.Result
