-- Hoogle documentation, generated by Haddock
-- See Hoogle, http://www.haskell.org/hoogle/


-- | A rich monadic API for working with leveldb databases.
--   
--   A rich monadic API for working with leveldb databases.
@package higher-leveldb
@version 0.5.0.1


-- | Higher LevelDB provides a rich monadic API for working with leveldb
--   (<a>http://code.google.com/p/leveldb</a>) databases. It uses the
--   leveldb-haskell bindings to the C++ library. The LevelDBT transformer
--   is a Reader that maintains a database context with the open database
--   as well as default read and write options. It also manages a concept
--   called a KeySpace, which is a bucket scheme that provides a low
--   (storage) overhead named identifier to segregate data. Finally it
--   wraps a <a>ResourceT</a> which is required for use of leveldb-haskell
--   functions.
--   
--   The other major feature is the scan function and its ScanQuery
--   structure that provides a map / fold abstraction over the Iterator
--   exposed by leveldb-haskell.
module Database.LevelDB.Higher
type Key = ByteString
type Value = ByteString

-- | The basic unit of storage is a Key/Value pair.
type Item = (Key, Value)

-- | A KeySpace is similar concept to a "bucket" in other libraries and
--   database systems. The ByteString for KeySpace can be arbitrarily long
--   without performance impact because the system maps the KeySpace name
--   to a 4-byte KeySpaceId internally which is preprended to each Key.
--   KeySpaces are cheap and plentiful and indeed with this library you
--   cannot escape them (you can supply an empty ByteString to use a
--   default KeySpace, but it is still used). One intended use case is to
--   use the full Key of a "parent" as the KeySpace of its children
--   (instance data in a time-series for example). This lets you scan over
--   a range-based key without passing over any unneeded items.
type KeySpace = ByteString
type KeySpaceId = ByteString

-- | Get a value from the current DB and KeySpace.
get :: (MonadLevelDB m) => Key -> m (Maybe Value)

-- | Put a value in the current DB and KeySpace.
put :: (MonadLevelDB m) => Key -> Value -> m ()

-- | Delete an entry from the current DB and KeySpace.
delete :: (MonadLevelDB m) => Key -> m ()

-- | Write a batch of operations - use the <tt>write</tt> and
--   <a>deleteB</a> functions to add operations to the batch list.
runBatch :: (MonadLevelDB m) => WriterT WriteBatch m () -> m ()

-- | Add a <a>Put</a> operation to a WriteBatch -- for use with
--   <a>runBatch</a>.
putB :: (MonadLevelDB m) => Key -> Value -> WriterT WriteBatch m ()

-- | Add a <a>Del</a> operation to a WriteBatch -- for use with
--   <a>runBatch</a>.
deleteB :: (MonadLevelDB m) => Key -> WriterT WriteBatch m ()

-- | Scan the keyspace, applying functions and returning results. Look at
--   the documentation for <a>ScanQuery</a> for more information.
--   
--   This is essentially a fold left that will run until the
--   <a>scanWhile</a> condition is met or the iterator is exhausted. All
--   the results will be copied into memory before the function returns.
scan :: (MonadLevelDB m) => Key -> ScanQuery a b -> m b

-- | Structure containing functions used within the <a>scan</a> function.
--   You may want to start with one of the builder/helper funcions such as
--   <a>queryItems</a>, which is defined as:
--   
--   <pre>
--   queryItems = queryBegins { scanInit = []
--                            , scanMap = id
--                            , scanFold = (:)
--                            }
--   </pre>
data ScanQuery a b
ScanQuery :: b -> (Key -> Item -> b -> Bool) -> (Item -> a) -> (Item -> Bool) -> (a -> b -> b) -> ScanQuery a b

-- | starting value for fold/reduce
[scanInit] :: ScanQuery a b -> b

-- | scan will continue until this returns false
[scanWhile] :: ScanQuery a b -> Key -> Item -> b -> Bool

-- | map or transform an item before it is reduced/accumulated
[scanMap] :: ScanQuery a b -> Item -> a

-- | filter function - return <a>False</a> to leave this <a>Item</a> out of
--   the result
[scanFilter] :: ScanQuery a b -> Item -> Bool

-- | accumulator/fold function e.g. (:)
[scanFold] :: ScanQuery a b -> a -> b -> b

-- | A basic ScanQuery helper; this query will find all keys that begin the
--   Key argument supplied to scan, and returns them in a list of
--   <a>Item</a>.
--   
--   Does not require any function overrides.
queryItems :: ScanQuery Item [Item]

-- | a ScanQuery helper with defaults for queryBegins and a list result;
--   requires a map function e.g.:
--   
--   <pre>
--   scan "encoded-values:" queryList { scanMap = \(_, v) -&gt; decode v }
--   </pre>
queryList :: ScanQuery a [a]

-- | A partial ScanQuery helper; this query will find all keys that begin
--   with the Key argument supplied to scan.
--   
--   Requires an <a>scanInit</a>, a <a>scanMap</a> and a <a>scanFold</a>
--   function.
queryBegins :: ScanQuery a b

-- | a ScanQuery helper to count items beginning with Key argument.
queryCount :: (Num a) => ScanQuery a a

-- | Use a local keyspace for the operation. e.g.:
--   
--   <pre>
--   runCreateLevelDB "/tmp/mydb" "MyKeySpace" $ do
--      put "somekey" "somevalue"
--      withKeySpace "Other KeySpace" $ do
--          put "somekey" "someother value"
--      get "somekey"
--   
--   Just "somevalue"
--   </pre>
withKeySpace :: (MonadLevelDB m) => KeySpace -> m a -> m a

-- | Local Read/Write Options for the action.
withOptions :: (MonadLevelDB m) => RWOptions -> m a -> m a

-- | Run a block of get operations based on a single snapshot taken at the
--   beginning of the action. The snapshot will be automatically released
--   when complete.
--   
--   This means that you can do put operations in the same block, but you
--   will not see those changes inside this computation.
withSnapshot :: (MonadLevelDB m) => m a -> m a

-- | Fork a LevelDBT IO action and return ThreadId into the current monad.
--   This uses <a>resourceForkIO</a> to handle the reference counting and
--   cleanup resources when the last thread exits.
forkLevelDB :: (MonadLevelDB m) => LevelDB () -> m ThreadId

-- | MonadLevelDB class used by all the public functions in this module.
class (Monad m, MonadThrow m, MonadIO m, Applicative m, MonadResource m) => MonadLevelDB m

-- | Override context for an action - only usable internally for functions
--   like <a>withKeySpace</a> and <a>withOptions</a>.
withDBContext :: MonadLevelDB m => (DBContext -> DBContext) -> m a -> m a

-- | Lift a LevelDBT IO action into the current monad.
liftLevelDB :: MonadLevelDB m => LevelDBT IO a -> m a

-- | LevelDBT Transformer provides a context for database operations
--   provided in this module.
--   
--   This transformer has the same constraints as <a>ResourceT</a> as it
--   wraps <a>ResourceT</a> along with a <a>DBContext</a> <a>Reader</a>.
--   
--   If you aren't building a custom monad stack you can just use the
--   <a>LevelDB</a> alias.
data LevelDBT m a

-- | alias for LevelDBT IO - useful if you aren't building a custom stack.
type LevelDB a = LevelDBT IO a

-- | Map/transform the monad below the LevelDBT
mapLevelDBT :: (m a -> n b) -> LevelDBT m a -> LevelDBT n b

-- | Build a context and execute the actions; uses a <a>ResourceT</a>
--   internally.
--   
--   tip: you can use the Data.Default (def) method to specify default
--   options e.g.
--   
--   <pre>
--   runLevelDB "/tmp/mydb" def (def, def{sync = true}) "My Keyspace" $ do
--   </pre>
runLevelDB :: (MonadThrow m, MonadUnliftIO m) => FilePath -> Options -> RWOptions -> KeySpace -> LevelDBT m a -> m a

-- | Same as <a>runLevelDB</a> but doesn't call <a>runResourceT</a>. This
--   gives you the option to manage that yourself
runLevelDB' :: (MonadThrow m, MonadUnliftIO m) => FilePath -> Options -> RWOptions -> KeySpace -> LevelDBT m a -> ResourceT m a

-- | A helper for runLevelDB using default <a>Options</a> except
--   createIfMissing=True
runCreateLevelDB :: (MonadThrow m, MonadUnliftIO m) => FilePath -> KeySpace -> LevelDBT m a -> m a

-- | Unwrap a <a>ResourceT</a> transformer, and call all registered release
--   actions.
--   
--   Note that there is some reference counting involved due to
--   <a>resourceForkIO</a>. If multiple threads are sharing the same
--   collection of resources, only the last call to <tt>runResourceT</tt>
--   will deallocate the resources.
--   
--   <i>NOTE</i> Since version 1.2.0, this function will throw a
--   <a>ResourceCleanupException</a> if any of the cleanup functions throw
--   an exception.
runResourceT :: MonadUnliftIO m => ResourceT m a -> m a

-- | Options when opening a database
data Options :: *
Options :: !Int -> !Int -> !Int -> !Maybe Comparator -> !Compression -> !Bool -> !Bool -> !Int -> !Bool -> !Int -> !Maybe Either BloomFilter FilterPolicy -> Options

-- | Number of keys between restart points for delta encoding of keys.
--   
--   This parameter can be changed dynamically. Most clients should leave
--   this parameter alone.
--   
--   Default: 16
[blockRestartInterval] :: Options -> !Int

-- | Approximate size of user data packed per block.
--   
--   Note that the block size specified here corresponds to uncompressed
--   data. The actual size of the unit read from disk may be smaller if
--   compression is enabled.
--   
--   This parameter can be changed dynamically.
--   
--   Default: 4k
[blockSize] :: Options -> !Int

-- | Control over blocks (user data is stored in a set of blocks, and a
--   block is the unit of reading from disk).
--   
--   If &gt; 0, use the specified cache (in bytes) for blocks. If 0,
--   leveldb will automatically create and use an 8MB internal cache.
--   
--   Default: 0
[cacheSize] :: Options -> !Int

-- | Comparator used to defined the order of keys in the table.
--   
--   If <a>Nothing</a>, the default comparator is used, which uses
--   lexicographic bytes-wise ordering.
--   
--   NOTE: the client must ensure that the comparator supplied here has the
--   same name and orders keys <i>exactly</i> the same as the comparator
--   provided to previous open calls on the same DB.
--   
--   Default: Nothing
[comparator] :: Options -> !Maybe Comparator

-- | Compress blocks using the specified compression algorithm.
--   
--   This parameter can be changed dynamically.
--   
--   Default: <a>Snappy</a>
[compression] :: Options -> !Compression

-- | If true, the database will be created if it is missing.
--   
--   Default: False
[createIfMissing] :: Options -> !Bool

-- | It true, an error is raised if the database already exists.
--   
--   Default: False
[errorIfExists] :: Options -> !Bool

-- | Number of open files that can be used by the DB.
--   
--   You may need to increase this if your database has a large working set
--   (budget one open file per 2MB of working set).
--   
--   Default: 1000
[maxOpenFiles] :: Options -> !Int

-- | If true, the implementation will do aggressive checking of the data it
--   is processing and will stop early if it detects any errors.
--   
--   This may have unforeseen ramifications: for example, a corruption of
--   one DB entry may cause a large number of entries to become unreadable
--   or for the entire DB to become unopenable.
--   
--   Default: False
[paranoidChecks] :: Options -> !Bool

-- | Amount of data to build up in memory (backed by an unsorted log on
--   disk) before converting to a sorted on-disk file.
--   
--   Larger values increase performance, especially during bulk loads. Up
--   to to write buffers may be held in memory at the same time, so you may
--   with to adjust this parameter to control memory usage. Also, a larger
--   write buffer will result in a longer recovery time the next time the
--   database is opened.
--   
--   Default: 4MB
[writeBufferSize] :: Options -> !Int
[filterPolicy] :: Options -> !Maybe Either BloomFilter FilterPolicy

-- | Options for read operations
data ReadOptions :: *
ReadOptions :: !Bool -> !Bool -> !Maybe Snapshot -> ReadOptions

-- | If true, all data read from underlying storage will be verified
--   against corresponding checksums.
--   
--   Default: False
[verifyCheckSums] :: ReadOptions -> !Bool

-- | Should the data read for this iteration be cached in memory? Callers
--   may with to set this field to false for bulk scans.
--   
--   Default: True
[fillCache] :: ReadOptions -> !Bool

-- | If <a>Just</a>, read as of the supplied snapshot (which must belong to
--   the DB that is being read and which must not have been released). If
--   <a>Nothing</a>, use an implicit snapshot of the state at the beginning
--   of this read operation.
--   
--   Default: Nothing
[useSnapshot] :: ReadOptions -> !Maybe Snapshot

-- | Options for write operations
data WriteOptions :: *
WriteOptions :: !Bool -> WriteOptions

-- | If true, the write will be flushed from the operating system buffer
--   cache (by calling WritableFile::Sync()) before the write is considered
--   complete. If this flag is true, writes will be slower.
--   
--   If this flag is false, and the machine crashes, some recent writes may
--   be lost. Note that if it is just the process that crashes (i.e., the
--   machine does not reboot), no writes will be lost even if sync==false.
--   
--   In other words, a DB write with sync==false has similar crash
--   semantics as the "write()" system call. A DB write with sync==true has
--   similar crash semantics to a "write()" system call followed by
--   "fsync()".
--   
--   Default: False
[sync] :: WriteOptions -> !Bool
type RWOptions = (ReadOptions, WriteOptions)
type WriteBatch = [BatchOp]

-- | The default value for this type.
def :: Default a => a

-- | A class for monads in which exceptions may be thrown.
--   
--   Instances should obey the following law:
--   
--   <pre>
--   throwM e &gt;&gt; x = throwM e
--   </pre>
--   
--   In other words, throwing an exception short-circuits the rest of the
--   monadic computation.
class Monad m => MonadThrow (m :: * -> *)

-- | Monads which allow their actions to be run in <a>IO</a>.
--   
--   While <a>MonadIO</a> allows an <a>IO</a> action to be lifted into
--   another monad, this class captures the opposite concept: allowing you
--   to capture the monadic context. Note that, in order to meet the laws
--   given below, the intuition is that a monad must have no monadic state,
--   but may have monadic context. This essentially limits
--   <a>MonadUnliftIO</a> to <a>ReaderT</a> and <a>IdentityT</a>
--   transformers on top of <a>IO</a>.
--   
--   Laws. For any value <tt>u</tt> returned by <a>askUnliftIO</a>, it must
--   meet the monad transformer laws as reformulated for
--   <tt>MonadUnliftIO</tt>:
--   
--   <ul>
--   <li><pre>unliftIO u . return = return</pre></li>
--   <li><pre>unliftIO u (m &gt;&gt;= f) = unliftIO u m &gt;&gt;= unliftIO
--   u . f</pre></li>
--   </ul>
--   
--   The third is a currently nameless law which ensures that the current
--   context is preserved.
--   
--   <ul>
--   <li><pre>askUnliftIO &gt;&gt;= (u -&gt; liftIO (unliftIO u m)) =
--   m</pre></li>
--   </ul>
--   
--   If you have a name for this, please submit it in a pull request for
--   great glory.
class MonadIO m => MonadUnliftIO (m :: * -> *)
instance Control.Monad.Catch.MonadThrow m => Control.Monad.Catch.MonadThrow (Database.LevelDB.Higher.LevelDBT m)
instance Control.Monad.IO.Class.MonadIO m => Control.Monad.IO.Class.MonadIO (Database.LevelDB.Higher.LevelDBT m)
instance GHC.Base.Monad m => GHC.Base.Monad (Database.LevelDB.Higher.LevelDBT m)
instance GHC.Base.Applicative m => GHC.Base.Applicative (Database.LevelDB.Higher.LevelDBT m)
instance GHC.Base.Functor m => GHC.Base.Functor (Database.LevelDB.Higher.LevelDBT m)
instance (Control.Monad.Catch.MonadThrow m, Control.Monad.IO.Unlift.MonadUnliftIO m) => Database.LevelDB.Higher.MonadLevelDB (Database.LevelDB.Higher.LevelDBT m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.Reader.ReaderT r m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.Maybe.MaybeT m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.Identity.IdentityT m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.List.ListT m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.Cont.ContT r m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.State.Lazy.StateT s m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.State.Strict.StateT s m)
instance (GHC.Base.Monoid w, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.Writer.Lazy.WriterT w m)
instance (GHC.Base.Monoid w, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.Writer.Strict.WriterT w m)
instance (GHC.Base.Monoid w, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.RWS.Lazy.RWST r w s m)
instance (GHC.Base.Monoid w, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.RWS.Strict.RWST r w s m)
instance (GHC.Base.Monad m, Database.LevelDB.Higher.MonadLevelDB m) => Database.LevelDB.Higher.MonadLevelDB (Control.Monad.Trans.Except.ExceptT e m)
instance Control.Monad.Base.MonadBase b m => Control.Monad.Base.MonadBase b (Database.LevelDB.Higher.LevelDBT m)
instance Control.Monad.Trans.Class.MonadTrans Database.LevelDB.Higher.LevelDBT
instance Control.Monad.IO.Unlift.MonadUnliftIO m => Control.Monad.Trans.Resource.Internal.MonadResource (Database.LevelDB.Higher.LevelDBT m)
instance Control.Monad.Catch.MonadCatch m => Control.Monad.Catch.MonadCatch (Database.LevelDB.Higher.LevelDBT m)
instance Control.Monad.Catch.MonadMask m => Control.Monad.Catch.MonadMask (Database.LevelDB.Higher.LevelDBT m)
instance Control.Monad.IO.Unlift.MonadUnliftIO m => Control.Monad.IO.Unlift.MonadUnliftIO (Database.LevelDB.Higher.LevelDBT m)
instance GHC.Show.Show Database.LevelDB.Higher.DBContext
