diff --git a/glean.cabal.in b/glean.cabal.in index d2d7c0d85..66ff224b8 100644 --- a/glean.cabal.in +++ b/glean.cabal.in @@ -93,6 +93,10 @@ flag opt flag benchmarks default: False +-- Support for backups to Amazon S3 +flag s3-support + default: True + -- run tests that require clang flag clang-tests default: True @@ -666,6 +670,30 @@ library db glean:stubs, glean:rocksdb, +library db-backup-s3 + import: fb-haskell, deps + visibility: private + hs-source-dirs: glean/db/backup/s3 + + exposed-modules: Glean.Database.Backup.S3 + build-depends: + glean:util, + glean:if-internal-hs, + glean:if-glean-hs, + glean:db, + amazonka, + amazonka-s3, + conduit, + unliftio, + microlens, + http-client, + + if flag(s3-support) + buildable: True + else + buildable: False + + -- Backend API, and a few things built on top library backend-api import: fb-haskell, fb-cpp, deps @@ -712,6 +740,7 @@ library backend-local Glean.Backend.Local Glean.Backend.Logging Glean.Dump + build-depends: glean:angle, glean:backend-api, @@ -1138,6 +1167,10 @@ executable glean-server haskeline >=0.7.3 && <0.9, json + if flag(s3-support) + cpp-options: -DENABLE_S3=1 + build-depends: glean:db-backup-s3 + library shell-lib import: fb-haskell, fb-cpp, deps hs-source-dirs: glean/shell @@ -1266,6 +1299,10 @@ executable glean split, Glob, + if flag(s3-support) + cpp-options: -DENABLE_S3=1 + build-depends: glean:db-backup-s3 + executable glean-hyperlink import: fb-haskell, fb-cpp, deps, exe hs-source-dirs: glean/demo @@ -2160,6 +2197,27 @@ test-suite backup glean:stubs, glean:util +test-suite backup-s3 + import: test + type: exitcode-stdio-1.0 + main-is: BackupTestS3.hs + ghc-options: -main-is BackupTestS3 + build-depends: + glean:config, + glean:core, + glean:db, + glean:if-glean-hs, + glean:if-internal-hs, + glean:schema, + glean:stubs, + glean:util, + glean:db-backup-s3 + + if flag(s3-support) + buildable: True + else + buildable: False + test-suite catalog import: test type: exitcode-stdio-1.0 diff --git a/glean/db/backup/s3/Glean/Database/Backup/S3.hs b/glean/db/backup/s3/Glean/Database/Backup/S3.hs new file mode 100644 index 000000000..7f4ab1d97 --- /dev/null +++ b/glean/db/backup/s3/Glean/Database/Backup/S3.hs @@ -0,0 +1,320 @@ +{- + Copyright (c) Meta Platforms, Inc. and affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +-} +{-# LANGUAGE TypeApplications #-} + +{- | S3 backup backend for Glean. + +Requirements: +- S3 bucket in a region which supports metadata (available in the [largest regions](https://docs.aws.amazon.com/AmazonS3/latest/userguide/metadata-tables-restrictions.html#metadata-tables-regions)). +- AWS credential discovery configured on the machine ([See Amazonka docs](https://hackage.haskell.org/package/amazonka-2.0/docs/Amazonka-Auth.html#v:discover)) + +Usage example: + +@ +glean --tier test --db-root ~/glean backup myrepo/0 s3:mybucket/my-dir +glean --tier test --db-root ~/glean2 restore s3:mybucket/my-dir/myrepo.0 +@ +-} +module Glean.Database.Backup.S3 (withS3Backups, fakeS3Backend) where + +import Control.Arrow (Arrow (..)) +import Control.Exception.Safe (throwIO) +import Control.Monad ((<=<), (>=>)) +import Data.ByteString (ByteString) +import qualified Data.ByteString as BS +import qualified Data.ByteString.Builder as BB +import qualified Data.ByteString.Lazy as LBS +import Data.Functor (void) +import qualified Data.HashMap.Strict as HashMap +import qualified Data.Map as Map +import Data.Maybe (catMaybes) +import qualified Data.Set as Set +import Data.Text (Text) +import qualified Data.Text as Text +import qualified Data.Text.Encoding as Text +import qualified Data.Text.Encoding.Error as Text +import qualified Network.HTTP.Client as HTTP +import UnliftIO (IORef, atomicModifyIORef', newIORef, readIORef, writeIORef) +import UnliftIO.Async (forConcurrently) +import UnliftIO.Exception.Lens + +import qualified Amazonka as AWS +import qualified Amazonka.S3 as S3 +import Amazonka.S3.DeleteObject +import Amazonka.S3.GetObject +import Amazonka.S3.HeadObject +import Amazonka.S3.ListObjectsV2 +import Amazonka.S3.PutObject +import Amazonka.S3.Types.Object +import Conduit +import Lens.Micro +import Lens.Micro.Extras + +import Glean.Database.Backup.Backend +import qualified Glean.Database.Config as DBConfig +import Glean.Database.Exception +import Glean.Internal.Types (Meta (..)) +import Glean.Types (Repo (..)) +import qualified Glean.Types as Thrift hiding (Exception) +import Glean.Util.Some +import qualified Thrift.Protocol.JSON as Thrift +import Util.Concurrent (cacheSuccess) +import Util.Log.Text + +withS3Backups :: DBConfig.Config -> IO DBConfig.Config +withS3Backups cfg@DBConfig.Config{..} = do + s3AwsEnvLazy <- cacheSuccess $ withLogging <$> AWS.newEnv AWS.discover + pure cfg{DBConfig.cfgBackupBackends = cfgBackupBackends <> HashMap.fromList [("s3", Some (genuineS3Backend s3AwsEnvLazy))]} + +withLogging :: AWS.Env' withAuth -> AWS.Env' withAuth +withLogging env = env{AWS.logger = \lvl -> ourLogger lvl . convertString} + where + convertString = Text.decodeUtf8With Text.lenientDecode . BS.toStrict . BB.toLazyByteString + + ourLogger AWS.Error msg = logError msg + ourLogger AWS.Info msg = logInfo msg + -- These are really spammy (especially at trace) so make them -v 2 only. + ourLogger AWS.Debug msg = vlog 2 msg + ourLogger AWS.Trace msg = vlog 3 msg + +-- | S3 backup backend, which auto discovers its environment configuration. +data S3Backend = S3Backend {s3BucketFactory :: (Text -> Some S3BucketApi)} + +-- | Creates a real S3 based backend. +genuineS3Backend :: IO AWS.Env -> S3Backend +genuineS3Backend awsEnv = newS3Backend (\name -> GenuineS3Bucket awsEnv (S3.BucketName name)) + +newS3Backend :: (S3BucketApi bucket) => (Text -> bucket) -> S3Backend +newS3Backend factory = S3Backend{s3BucketFactory = \name -> Some (factory name)} + +-- | Creates a new fake in-memory S3 backend. +fakeS3Backend :: (MonadIO m) => m S3Backend +fakeS3Backend = do + fakeFiles <- newIORef Map.empty + pure $ newS3Backend (\name -> FakeS3Bucket{fakeBucketName = name, fakeFiles}) + +instance Backend S3Backend where + fromPath S3Backend{s3BucketFactory} path = do + let (bucketName, bucketBasePath') = Text.breakOn "/" path + (_, bucketBasePath) <- Text.uncons bucketBasePath' + Just . Some $ S3Site{s3Client = s3BucketFactory bucketName, bucketBasePath} + +-- | File in an S3 bucket. +data Item = Item + { itemPath :: Text + } + +type Metadata = HashMap.HashMap Text Text + +-- | A client to an S3 bucket, as a seam for unit testing purposes. +class S3BucketApi a where + -- | Gets the name of the bucket this is a client for. + bucketName :: a -> Text + + {- | Lists items with a prefix. + + We aren't using the delimiter functionality (which does dedupe by common + prefixes) as we need both the meta and the file itself to exist for + consistency, so we need to catch both of those. + + This also doesn't concern itself with pagination because our consuming API + doesn't either. + -} + listItemsWithPrefix :: (MonadResource m, MonadUnliftIO m) => a -> Text -> m [Item] + + -- | Uploads a file on disk to the given path in the bucket. + uploadFile :: (MonadResource m, MonadUnliftIO m) => a -> Text -> Metadata -> AWS.RequestBody -> m () + + {- | Downloads a file from S3. + + Throws for any error but absent key. + -} + downloadFile :: (MonadResource m, MonadUnliftIO m) => a -> Text -> m (Maybe (Metadata, AWS.ResponseBody)) + + {- | Checks a file exists on S3. + + Throws for any error but absent key. + -} + headFile :: (MonadResource m, MonadUnliftIO m) => a -> Text -> m (Maybe Metadata) + + -- | Deletes a file in the bucket. + deleteFile :: (MonadResource m, MonadUnliftIO m) => a -> Text -> m () + +instance S3BucketApi (Some S3BucketApi) where + bucketName (Some bucket) = bucketName bucket + listItemsWithPrefix (Some bucket) = listItemsWithPrefix bucket + uploadFile (Some bucket) = uploadFile bucket + downloadFile (Some bucket) = downloadFile bucket + headFile (Some bucket) = headFile bucket + deleteFile (Some bucket) = deleteFile bucket + +data GenuineS3Bucket = GenuineS3Bucket + { awsEnvLazy :: IO AWS.Env + , s3BucketName :: S3.BucketName + } + +instance S3BucketApi GenuineS3Bucket where + bucketName = view S3._BucketName . s3BucketName + + listItemsWithPrefix GenuineS3Bucket{awsEnvLazy, s3BucketName} prefix = do + awsEnv <- liftIO awsEnvLazy + let request = + newListObjectsV2 s3BucketName + & listObjectsV2_prefix + ?~ prefix + runConduit $ + AWS.paginate awsEnv request + .| concatMapC (^. listObjectsV2Response_contents) + .| concatC + .| mapC (\obj -> Item{itemPath = obj ^. object_key . S3._ObjectKey}) + .| sinkList + + uploadFile GenuineS3Bucket{awsEnvLazy, s3BucketName} name metadata body = do + awsEnv <- liftIO awsEnvLazy + let req = + newPutObject s3BucketName (S3.ObjectKey name) body + & (putObject_metadata .~ metadata) + void . AWS.send awsEnv $ req + + headFile GenuineS3Bucket{awsEnvLazy, s3BucketName} name = do + awsEnv <- liftIO awsEnvLazy + let req = newHeadObject s3BucketName (S3.ObjectKey name) + handling_ S3._NoSuchKey (pure Nothing) $ + Just . view headObjectResponse_metadata <$> AWS.send awsEnv req + + downloadFile GenuineS3Bucket{awsEnvLazy, s3BucketName} name = do + awsEnv <- liftIO awsEnvLazy + let req = newGetObject s3BucketName (S3.ObjectKey name) + handling_ S3._NoSuchKey (pure Nothing) $ + Just . (view getObjectResponse_metadata &&& view getObjectResponse_body) + <$> AWS.send awsEnv req + + deleteFile GenuineS3Bucket{awsEnvLazy, s3BucketName} name = do + awsEnv <- liftIO awsEnvLazy + let req = newDeleteObject s3BucketName (S3.ObjectKey name) + void $ AWS.send awsEnv req + +data FakeS3Bucket = FakeS3Bucket + { fakeFiles :: IORef (Map.Map Text (Metadata, ByteString)) + , fakeBucketName :: Text + } + +-- FIXME(jadel): upstream this to http-client +materializeRequestBody :: HTTP.RequestBody -> IO LBS.ByteString +materializeRequestBody = \case + HTTP.RequestBodyLBS lbs -> pure lbs + HTTP.RequestBodyBS bs -> pure (BS.fromStrict bs) + HTTP.RequestBodyBuilder _ b -> pure (BB.toLazyByteString b) + HTTP.RequestBodyStream _ g -> withPopper g + HTTP.RequestBodyStreamChunked g -> withPopper g + HTTP.RequestBodyIO io -> materializeRequestBody =<< io + where + withPopper :: HTTP.GivesPopper () -> IO LBS.ByteString + withPopper giver = do + r <- newIORef [] + giver $ do writeIORef r <=< getChunks + LBS.fromChunks <$> readIORef r + + getChunks :: IO ByteString -> IO [ByteString] + getChunks io = + io >>= \case + "" -> pure [] + chunk -> (chunk :) <$> getChunks io + +instance S3BucketApi FakeS3Bucket where + bucketName = fakeBucketName + + listItemsWithPrefix FakeS3Bucket{fakeFiles} prefix = do + fmap Item . filter (prefix `Text.isPrefixOf`) . Map.keys <$> readIORef fakeFiles + + uploadFile FakeS3Bucket{fakeFiles} name metadata body = do + body <- liftIO . materializeRequestBody $ AWS.toRequestBody body + atomicModifyIORef' fakeFiles (\files -> (files <> Map.singleton name (metadata, BS.toStrict body), ())) + + headFile FakeS3Bucket{fakeFiles} name = do + fmap fst . Map.lookup name <$> readIORef fakeFiles + + downloadFile FakeS3Bucket{fakeFiles} name = do + fmap (\(meta, body) -> (meta, AWS.ResponseBody $ yield body)) . Map.lookup name <$> readIORef fakeFiles + + deleteFile FakeS3Bucket{fakeFiles} name = do + atomicModifyIORef' fakeFiles ((,()) . (`Map.withoutKeys` (Set.singleton name))) + +data S3Site = S3Site + { s3Client :: Some S3BucketApi + , bucketBasePath :: Text + } + +makeRepoPath :: Text -> Repo -> Text +makeRepoPath bucketBasePath Repo{repo_name, repo_hash} = Text.intercalate "/" [bucketBasePath, repo_name, repo_hash] + +dbPath :: Text -> Text +dbPath = (<> ".tar.gz") + +metadataKey :: Text +metadataKey = "glean-metadata" + +parseMeta :: (MonadIO m, MonadThrow m) => Repo -> Text -> m Meta +parseMeta repo = either (dbError repo) pure . Thrift.deserializeJSON . Text.encodeUtf8 + +instance Site S3Site where + backup S3Site{s3Client, bucketBasePath} repo meta _ttl fromPath = runResourceT $ do + let repoPath = makeRepoPath bucketBasePath repo + body <- AWS.chunkedFile AWS.defaultChunkSize fromPath + + -- https://docs.aws.amazon.com/AmazonS3/latest/userguide/UsingMetadata.html#UserMetadata + -- Arbitrary printable Unicode characters only means that any binary + -- encoding would force us to base64 it, which seems like a hassle and + -- maybe not even more compact. + let meta' = HashMap.singleton metadataKey (Text.decodeUtf8 $ Thrift.serializeJSON meta) + _ <- uploadFile s3Client (dbPath repoPath) meta' body + pure $ Data (fromIntegral $ AWS.contentLength body) + + delete S3Site{s3Client, bucketBasePath} repo = runResourceT $ do + let repoPath = makeRepoPath bucketBasePath repo + deleteFile s3Client (dbPath repoPath) + + restore S3Site{s3Client, bucketBasePath} repo intoPath = runResourceT $ do + let repoPath = makeRepoPath bucketBasePath repo + res <- downloadFile s3Client (dbPath repoPath) + case res of + Just (meta, repoStream) + | Just metaJson <- HashMap.lookup metadataKey meta -> do + meta <- parseMeta repo metaJson + runConduit $ AWS.sinkBody repoStream (sinkFile intoPath) + pure meta + _ -> throwIO . Thrift.InvalidLocator $ "locator is missing either metadata or db.tar.gz" <> (Text.pack . show) repo + + inspect S3Site{s3Client, bucketBasePath} repo = runResourceT $ do + let repoPath = makeRepoPath bucketBasePath repo + res <- headFile s3Client (dbPath repoPath) + + case res of + Just meta + | Just metaJson <- HashMap.lookup metadataKey meta -> + parseMeta repo metaJson + _ -> throwIO . Thrift.InvalidLocator $ "locator is missing either metadata or db.tar.gz: " <> (Text.pack . show) repo + + enumerate site@S3Site{s3Client, bucketBasePath} = runResourceT $ do + items <- listItemsWithPrefix s3Client bucketBasePath + let parsed = catMaybes $ map parseItemFilename items + forConcurrently parsed $ \repo -> (repo,) <$> liftIO (inspect site repo) + where + parseItemFilename = + (Text.stripPrefix (bucketBasePath <> "/") . itemPath) + >=> Text.stripSuffix ".tar.gz" + >=> splitFilename + -- >>> splitFilename "myrepo/123" + -- Just (Repo "myrepo" "123") + splitFilename name + | let (withTrailingSlash, repo_hash) = Text.breakOnEnd "/" name + , Just (repo_name, _slash) <- Text.unsnoc withTrailingSlash = + Just Repo{repo_name, repo_hash} + splitFilename _name = Nothing + + toPath S3Site{s3Client, bucketBasePath} = bucketName s3Client <> "/" <> bucketBasePath diff --git a/glean/server/Glean/Server.hs b/glean/server/Glean/Server.hs index ffd48f1c2..b2975e0d0 100644 --- a/glean/server/Glean/Server.hs +++ b/glean/server/Glean/Server.hs @@ -59,34 +59,39 @@ import Glean.Server.Sharding ( waitForTerminateSignalsAndGracefulShutdown) import Glean.Util.ConfigProvider +#if ENABLE_S3 +import qualified Glean.Database.Backup.S3 as S3 +#endif + main :: IO () main = withConfigOptions (O.info options O.fullDesc) $ \(cfg0, cfgOpts) -> withEventBaseDataplane $ \evb -> - withConfigProvider cfgOpts $ \(configAPI :: ConfigAPI) -> + withConfigProvider cfgOpts $ \(configAPI :: ConfigAPI) -> do #if GLEAN_FACEBOOK withLogger configAPI $ \logger -> withTracing $ \tracer -> - withAvailableDBFilterViaSR evb $ \filterAvailableDBs -> + withAvailableDBFilterViaSR evb $ \filterAvailableDBs -> do #endif - let dbCfg = (cfgDBConfig cfg0){ - cfgShardManager = shardManagerConfig (cfgPort cfg) + let + dbCfg = (cfgDBConfig cfg0){ + cfgShardManager = shardManagerConfig (cfgPort cfg0) #if GLEAN_FACEBOOK - , cfgServerLogger = Some (GleanServerFacebookLogger logger) - , cfgDatabaseLogger = Some (GleanDatabaseFacebookLogger logger) - , cfgBatchLocationParser = Some (FacebookBatchLocationParser) - , cfgFilterAvailableDBs = filterAvailableDBs - , cfgTracer = tracer + , cfgServerLogger = Some (GleanServerFacebookLogger logger) + , cfgDatabaseLogger = Some (GleanDatabaseFacebookLogger logger) + , cfgBatchLocationParser = Some (FacebookBatchLocationParser) + , cfgFilterAvailableDBs = filterAvailableDBs + , cfgTracer = tracer #endif } - #if GLEAN_FACEBOOK - cfg = cfg0{cfgDBConfig = XDB.withXdbCatalog "manifold" $ + let cfg = cfg0{cfgDBConfig = XDB.withXdbCatalog "manifold" $ Manifold.withManifoldBackups evb dbCfg} +#elif ENABLE_S3 + cfg <- (\dbCfg' -> cfg0{cfgDBConfig = dbCfg'}) <$> S3.withS3Backups dbCfg #else - cfg = cfg0{cfgDBConfig = dbCfg} + let cfg = cfg0{cfgDBConfig = dbCfg} #endif - in withDatabases evb (cfgDBConfig cfg) configAPI $ \databases -> do terminating <- newTVarIO False withShardsUpdater evb cfg databases (1 :: Seconds) (readTVar terminating) $ do diff --git a/glean/test/tests/BackupTestS3.hs b/glean/test/tests/BackupTestS3.hs new file mode 100644 index 000000000..9ed4982de --- /dev/null +++ b/glean/test/tests/BackupTestS3.hs @@ -0,0 +1,94 @@ +{- + Copyright (c) Meta Platforms, Inc. and affiliates. + All rights reserved. + + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. +-} +{-# OPTIONS_GHC -Wno-incomplete-uni-patterns #-} + +module BackupTestS3 (main) where + +import Control.Monad +import Control.Monad.IO.Class (MonadIO (..)) +import qualified Data.HashMap.Strict as HashMap +import Data.Time (fromGregorian) +import Data.Time.Clock +import System.IO (hClose) +import qualified System.IO.Temp as Temp +import Test.HUnit + +import TestRunner + +import qualified Glean.Database.Backup.Backend as Backup +import Glean.Database.Backup.S3 +import Glean.Database.Meta +import Glean.Init +import Glean.Internal.Types (Completeness (Broken), StorageName (..)) +import Glean.ServerConfig.Types as ServerTypes +import Glean.Types as Thrift +import Glean.Util.IO (withTempFileContents) +import Glean.Util.Some + +meta :: Meta +meta = + newMeta + (StorageName "rocksdb") + (DBVersion 3) + (DBTimestamp (UTCTime (fromGregorian 1 1 1) 0) Nothing) + (Broken (DatabaseBroken "test" "test")) + (HashMap.fromList [("a", "b")]) + Nothing + +testDbRepo :: Repo +testDbRepo = Repo "foo" "123" + +testSite :: (MonadIO m) => m (Some Backup.Site) +testSite = do + backend <- fakeS3Backend + let Just site = Backup.fromPath backend "s3:testbucket/base/base2" + pure site + +restoresTest :: Test +restoresTest = TestCase $ do + site <- testSite + + withTempFileContents ("abcd" :: String) $ \path -> do + void $ Backup.backup site testDbRepo meta Nothing path + + Temp.withSystemTempFile "glean" $ \path h -> do + hClose h + meta' <- Backup.restore site testDbRepo path + assertEqual "restored database meta is the same" meta' meta + content <- readFile path + assertEqual "restored database content is the same" content "abcd" + +metadatasTest :: Test +metadatasTest = TestCase $ do + site <- testSite + + dbs <- Backup.enumerate site + assertEqual "no databases" dbs [] + + withTempFileContents ("abcd" :: String) $ \path -> do + void $ Backup.backup site testDbRepo meta Nothing path + + db <- Backup.inspect site testDbRepo + assertEqual "database inspectable" db meta + + dbs' <- Backup.enumerate site + assertEqual "databases exists after" dbs' [(testDbRepo, meta)] + + Backup.delete site testDbRepo + + dbs'' <- Backup.enumerate site + assertEqual "no databases after deleting it" dbs'' [] + +main :: IO () +main = + withUnitTest $ + testRunner $ + TestList + [ TestLabel "restores" restoresTest + , TestLabel "metadata" metadatasTest + ] diff --git a/glean/tools/gleancli/GleanCLI.hs b/glean/tools/gleancli/GleanCLI.hs index 3259508cc..aae680282 100644 --- a/glean/tools/gleancli/GleanCLI.hs +++ b/glean/tools/gleancli/GleanCLI.hs @@ -52,6 +52,10 @@ import Glean.Util.ConfigProvider import Glean.Util.ShellPrint import Glean.Shell +#if ENABLE_S3 +import qualified Glean.Database.Backup.S3 as S3 +#endif + import GleanCLI.Backup import GleanCLI.Common import GleanCLI.Complete @@ -157,20 +161,26 @@ main = withEventBaseDataplane $ \evb -> withConfigProvider cfgOpts $ \cfgAPI -> case cfgCommand of - PluginCommand c -> - withService evb cfgAPI - (liftServerConfig (serverConfigTransform c) $ - withRemoteBackups evb cfgService) - c - -withRemoteBackups :: EventBaseDataplane -> Glean.Service -> Glean.Service -withRemoteBackups _evb = liftConfig + PluginCommand c -> do + cfgService' <- withRemoteBackups evb cfgService + withService evb cfgAPI (liftServerConfig (serverConfigTransform c) cfgService') c + +withRemoteBackups :: EventBaseDataplane -> Glean.Service -> IO Glean.Service +withRemoteBackups _evb = liftConfigIO $ #if GLEAN_FACEBOOK - (XDB.withXdbCatalog "manifold" . Manifold.withManifoldBackups _evb) + pure . (XDB.withXdbCatalog "manifold" . Manifold.withManifoldBackups _evb) +#elif ENABLE_S3 + S3.withS3Backups #else - id + pure #endif +liftConfigIO + :: (GleanDB.Config -> IO GleanDB.Config) + -> Glean.Service -> IO Glean.Service +liftConfigIO f (Glean.Local cfg log) = fmap (\cfg' -> Glean.Local cfg' log) $ f cfg +liftConfigIO _ other@Glean.Remote{} = pure other + liftConfig :: (GleanDB.Config -> GleanDB.Config) -> Glean.Service -> Glean.Service diff --git a/glean/website/docs/databases.md b/glean/website/docs/databases.md index 0f8c443bc..5907f596b 100644 --- a/glean/website/docs/databases.md +++ b/glean/website/docs/databases.md @@ -79,7 +79,8 @@ are supported in this state, and always reflect the current contents. :::note -There are currently no backup backends implemented for open-source Glean. +There is currently one backup backend supported in open source Glean: Amazon S3. +See the `Glean.Database.Backup.S3` module for details. :::