From 3f34dc93145e76c1ee156b1ee64df119d9e4f76b Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Mon, 9 Jul 2018 18:03:09 +0200 Subject: [PATCH] doc: Create own chapter for Storage Backends Rework chapter about Droplet Storage Backend. Fixes #949: S3 Droplet backend documentation missing --- .../en/main/bareos-manual-main-reference.tex | 3 + .../bareos-sd-resource-device-definitions.tex | 36 +-- docs/manuals/en/main/bareos.sty | 4 + .../en/main/plugins-droplet-plugin.tex | 155 ---------- docs/manuals/en/main/plugins.tex | 3 - .../en/main/storage-backend-droplet.tex | 271 ++++++++++++++++++ docs/manuals/en/main/storage-backends.tex | 55 ++++ 7 files changed, 346 insertions(+), 181 deletions(-) delete mode 100644 docs/manuals/en/main/plugins-droplet-plugin.tex create mode 100644 docs/manuals/en/main/storage-backend-droplet.tex create mode 100644 docs/manuals/en/main/storage-backends.tex diff --git a/docs/manuals/en/main/bareos-manual-main-reference.tex b/docs/manuals/en/main/bareos-manual-main-reference.tex index 9f25ea4d778..b21ee8afb26 100644 --- a/docs/manuals/en/main/bareos-manual-main-reference.tex +++ b/docs/manuals/en/main/bareos-manual-main-reference.tex @@ -192,6 +192,9 @@ \chapter{Volume Management} %tape \include{tape-without-autochanger} +\include{storage-backends} + + \include{spooling} \include{migration} \include{always-incremental} diff --git a/docs/manuals/en/main/bareos-sd-resource-device-definitions.tex b/docs/manuals/en/main/bareos-sd-resource-device-definitions.tex index 4aa69db775f..03e0a4a630c 100644 --- a/docs/manuals/en/main/bareos-sd-resource-device-definitions.tex +++ b/docs/manuals/en/main/bareos-sd-resource-device-definitions.tex @@ -322,27 +322,9 @@ Some \linkResourceDirective{Sd}{Device}{Device Type} require additional configuration. This can be specified in this directive, e.g. for \begin{description} - \item [\sdBackend{GFAPI}{GlusterFS}] - A GlusterFS Storage can be used as Storage backend of Bareos. - Prerequistes are a working GlusterFS storage system and the package \package{bareos-storage-glusterfs}. - See \url{http://www.gluster.org/} for more information regarding GlusterFS installation and configuration - and specifically \url{https://gluster.readthedocs.org/en/latest/Administrator Guide/Bareos/} - for Bareos integration. - You can use following snippet to configure it as storage device: - \bconfigInput{config/SdDeviceDeviceOptionsGfapi1.conf} - Adapt server and volume name to your environment. - - \sinceVersion{sd}{GlusterFS Storage}{15.2.0} - - \item [\sdBackend{Rados}{Ceph Object Store}] - Here you configure the Ceph object store, which is accessed by the SD using the Rados library. Prerequistes are a - working Ceph object store and the package \package{bareos-storage-ceph}. See \url{http://ceph.com} for more information regarding Ceph installation and configuration. - Assuming that you have an object store with name \file{poolname} - and your Ceph access is configured in \file{/etc/ceph/ceph.conf}, - you can use following snippet to configure it as storage device: - \bconfigInput{config/SdDeviceDeviceOptionsRados1.conf} - - \sinceVersion{sd}{Ceph Storage}{15.2.0} + \item [\nameref{SdBackendDroplet}] + \item [\nameref{SdBackendGfapi}] + \item [\nameref{SdBackendRados}] \end{description} Before the Device Options directive have been introduced, @@ -363,12 +345,20 @@ USB. All files must be random access devices. \item [\sdBackend{Fifo}{}] is a first-in-first-out sequential access read-only or write-only device. -\item [\sdBackend{GFAPI}{GlusterFS}] \label{SdBackendGfapi} is used to access a GlusterFS storage. +\item [\sdBackend{GFAPI}{GlusterFS}] is used to access a GlusterFS storage. It must be configured using \linkResourceDirective{Sd}{Device}{Device Options}. + For details, refer to \nameref{SdBackendGfapi}. + \sinceVersion{sd}{GlusterFS (gfapi)}{14.2.2} -\item [\sdBackend{Rados}{Ceph Object Store}] \label{SdBackendRados} is used to access a Ceph object store. +\item [\sdBackend{Rados}{Ceph Object Store}] is used to access a Ceph object store. It must be configured using \linkResourceDirective{Sd}{Device}{Device Options}. + For details, refer to \nameref{SdBackendRados}. + \sinceVersion{sd}{Ceph (Rados)}{14.2.2} +\item [\sdBackend{Droplet}{}] is used to access an object store supported by \package{libdroplet}, most notably S3. + For details, refer to \nameref{SdBackendDroplet}. + + \sinceVersion{sd}{Droplet}{17.2.7} \end{description} The Device Type directive is not required in all cases. diff --git a/docs/manuals/en/main/bareos.sty b/docs/manuals/en/main/bareos.sty index 7b88be5ca2c..cf341562190 100644 --- a/docs/manuals/en/main/bareos.sty +++ b/docs/manuals/en/main/bareos.sty @@ -180,6 +180,10 @@ \elink{Bareos Regression Testing Base Configuration}{https://github.com/bareos/bareos-regress/tree/master/configs/BASE/}\xspace% } +\newcommand{\externalReferenceDroplet}{% +\url{https://github.com/scality/Droplet}\xspace% +} + \newcommand{\externalReferenceIsilonNdmpEnvironmentVariables}{% \elink{Isilon OneFS 7.2.0 CLI Administration Guide}{https://www.emc.com/collateral/TechnicalDocument/docu56048.pdf}, section \bquote{NDMP environment variables}\xspace% } diff --git a/docs/manuals/en/main/plugins-droplet-plugin.tex b/docs/manuals/en/main/plugins-droplet-plugin.tex deleted file mode 100644 index b85c558aa25..00000000000 --- a/docs/manuals/en/main/plugins-droplet-plugin.tex +++ /dev/null @@ -1,155 +0,0 @@ -\subsection{Droplet plugin} -\label{DropletPlugin} -\index[general]{Plugin!Droplet} -\index[general]{Droplet Plugin} - -The \package{bareos-storage-droplet} plugin can be used to access Object Storage through \package{librdroplet}. - -\subsubsection{Installation} - -Install the package \package{bareos-storage-droplet} including its requirments -by using an appropriate package management tool -(eg. \command{yum}, \command{zypper}). - -\subsubsection{Configuration} -The droplet backend requires a storage ressource, a special device ressource as well as a droplet.profile file where your access- and secret-keys and other parameters for the connection to your object storage are stored. First, we will create the new storage ressource. -Configure the ressource in your \bareosDir storage configuration and save it to \path|/etc/bareos/bareos-dir.d/storage/S3_Object.conf| - -\begin{bconfig}{bareos-dir}{storage}{S3_Object.conf} -Storage { - Name = "S3_Object" # Replace this by the Bareos Storage Daemon Name - Address = "bareos-sd.example.com" # Replace this by the Bareos Storage Daemon FQDN or IP address - Password = "secret" # Replace this by the Bareos Storage Daemon director password - Device = "S3_ObjectStorage" # Mention the new devices name here - Media Type = "S3_Object1" - -} -\end{bconfig} - -As of your \bareosSd daemon's configuration, we need to setup a new device that acts as a link to your bucket. -Name and media type must match those in the director's storage ressource. - -\begin{description} -\item[profile=] - Droplet profile to use either absolute PATH or logical name (e.g. /etc/bareos/bareos-sd.d/droplet/droplet.profile). Make sure this is accessible for bareos. -\item[location=] - Optional, but required for AWS Storage (e.g. eu-west-2 etc.) -\item[acl=] - Canned ACL -\item[storageclass=] - Storage Class to use. -\item[bucket=] - Bucket to store objects in. -\item[chunksize=] - Size of Volume Chunks (default = 10 Mb) -\item[iothreads=] - Number of IO-threads to use for uploads (use blocking uploads if not set.) -\item[ioslots=] - Number of IO-slots per IO-thread (default 10). Set this to >= 1 for cached and to 0 for direct writing. -\item[retries=] - Number of writing tries before discarding a job. Set this to 0 for unlimited retries. Setting anything != 0 here will cause dataloss if the backend is not available, so be very careful here. -\item[mmap=] - Use mmap to allocate Chunk memory instead of malloc(). -\end{description} - -A device for the usage of AWS S3 object storage with a bucket named "backup-bareos" located in EU West 2, London, would look like this: -\begin{bconfig}{bareos-sd.d}{device}{AWS_S3_1-00.conf} -Device { - Name = "AWS_S3_1-00" - Media Type = "AWS_S3_File_1" - Archive Device = "AWS S3 Storage" - Device Options = "profile=/etc/bareos/bareos-sd.d/droplet/aws_droplet.profile,bucket=backup-bareos,location=eu-west-2,chunksize=100M" - Device Type = droplet - LabelMedia = yes # Lets Bareos label unlabeled media - Random Access = yes - AutomaticMount = yes # When device opened, read it - RemovableMedia = no - AlwaysOpen = no - Description = "S3 device" - Maximum File Size = 500M # 500 MB (allows for seeking to small portions of the Volume) - Maximum Concurrent Jobs = 1 - Maximum Spool Size = 15000M -} -\end{bconfig} - -A device for CEPH object storage could look like this: -\begin{bconfig}{bareos-sd.d}{device}{CEPH_1-00.conf} -Device { - Name = "CEPH_1-00" - Media Type = "CEPH_File_1" - Archive Device = "Object S3 Storage" - Device Options = "profile=/etc/bareos/bareos-sd.d/droplet/ceph_droplet.profile,bucket=backup-bareos,chunksize=100M" - Device Type = droplet - LabelMedia = yes # Lets Bareos label unlabeled media - Random Access = yes - AutomaticMount = yes # When device opened, read it - RemovableMedia = no - AlwaysOpen = no - Description = "S3 device" - Maximum File Size = 500M # 500 MB (allows for seeking to small portions of the Volume) - Maximum Concurrent Jobs = 1 - Maximum Spool Size = 15000M -} -\end{bconfig} - -Create the profile to be used by the backend, the default path is \path|/etc/bareos-sd.d/droplet/droplet.profile|. -This profile is used later by the droplet library when accessing your cloud storage. An example for AWS S3 could look like this: - -\begin{bconfig}{bareos-sd.d}{droplet}{aws_droplet.conf} -use_https = false # Default is false, if set to true you may use the SSL parameters given in the droplet configuration wiki, see below. -host = s3.amazonaws.com # This parameter is only used as baseurl and will be prepended with bucket and location set in device ressource to form correct url -access_key = myaccesskey -secret_key = mysecretkey -pricing_dir = "" # If not empty, an droplet.csv file will be created which will record all S3 operations. -backend = s3 -aws_auth_sign_version = 4 # While AWS S3 requires this set to 4, use 2 for CEPH S3 Connections. -\end{bconfig} - -And for CEPH it would be: -\begin{bconfig}{bareos-sd.d}{droplet}{ceph_droplet.conf} -use_https = false -host = CEPH-host.example.com -access_key = myaccesskey -secret_key = mysecretkey -pricing_dir = "/tmp" -backend = s3 -aws_auth_sign_version = 2 -\end{bconfig} - -More arguments and the SSL parameters (untested) can be found in the documentation of the droplet library: -\url{https://github.com/scality/Droplet/wiki/Configuration-File} - -\subsubsection{Troubleshooting} - -If the S3 backend becomes or is unreachable, the storage daemon will behave depending on \argument{iothreads} and \argument{retries}. -When the storage daemon is using cached writing (\argument{iothreads}>=1) and \argument{retries} is set to zero (unlimited tries), the job will continue running until the backend becomes available again. The job cannot be canceled in this case, as the storage daemon will continuously try to write the cached files. -Great caution should be used when using \argument{retries} > 0 combined with cached writing. If the backend becomes unavailable and the storage daemon reaches the predefined tries, the job will be discarded silently yet marked as "OK" in the \bareosDir. -You can always check the status of the writing process by using \bcommand{status storage=...}. The current writing status will be displayed then: -\begin{bconsole}{status storage} -... -Device "S3_ObjectStorage" (S3) is mounted with: - Volume: Full-0085 - Pool: Full - Media type: S3_Object1 -Backend connection is working. -Inflight chunks: 2 -Pending IO flush requests: - /Full-0085/0002 - 10485760 (try=0) - /Full-0085/0003 - 10485760 (try=0) - /Full-0085/0004 - 10485760 (try=0) -... -Attached Jobs: 175 -... - -\end{bconsole} -\argument{Pending IO flush requests} means that there is data to be written. \argument{try}=0 means that this is the first try and no problem has occurred. If \argument{try}>0, problems occurred and the storage daemon will continue trying. - -Status without pending IO chunks: -\begin{bconsole}{status storage} -... -Device "S3_ObjectStorage" (S3) is mounted with: - Volume: Full-0084 - Pool: Full - Media type: S3_Object1 -Backend connection is working. -No Pending IO flush requests. -Configured device capabilities: - EOF BSR BSF FSR FSF EOM !REM RACCESS AUTOMOUNT LABEL !ANONVOLS !ALWAYSOPEN -Device state: - OPENED !TAPE LABEL !MALLOC APPEND !READ EOT !WEOT !EOF !NEXTVOL !SHORT MOUNTED - num_writers=0 reserves=0 block=8 -Attached Jobs: -... -\end{bconsole} - -If you use AWS S3 object storage and want to debug your non-functional bareos setup, it is recommended to turn on the server access logging in your bucket properties. You will see if bareos gets to try writing into your bucket or not. \ No newline at end of file diff --git a/docs/manuals/en/main/plugins.tex b/docs/manuals/en/main/plugins.tex index 024152e399e..b4f104abaf5 100644 --- a/docs/manuals/en/main/plugins.tex +++ b/docs/manuals/en/main/plugins.tex @@ -445,9 +445,6 @@ \subsection{python-sd Plugin} The \name{python-sd} plugin behaves similar to the \nameref{director-python-plugin}. -\subsucetion{bareos-storage-droplet} -\input{plugins-droplet-plugin} - \section{Director Plugins} \label{dirPlugins} diff --git a/docs/manuals/en/main/storage-backend-droplet.tex b/docs/manuals/en/main/storage-backend-droplet.tex new file mode 100644 index 00000000000..619e8825016 --- /dev/null +++ b/docs/manuals/en/main/storage-backend-droplet.tex @@ -0,0 +1,271 @@ +\section{Droplet Storage Backend} +\index[sd]{Backend!Droplet} +\index[sd]{Backend!Droplet!S3} +\index[sd]{Backend!S3|see {Backend!Droplet}} +\label{SdBackendDroplet} + +The \package{bareos-storage-droplet} backend (\sinceVersion{sd}{Droplet}{17.2.7}) can be used to access Object Storage through \package{libdroplet}. +Droplet support a number of backends, most notably S3. +For details about Droplet itself see \externalReferenceDroplet. + +\subsection{Requirements} + +\begin{itemize} + \item The Bareos package \package{bareos-storage-droplet} is not available on all platforms. Please refer to \nameref{sec:packages}. + \item Droplet S3: + \begin{itemize} + \item The droplet S3 can only be used with virtual-hosted-style buckets like \url{http://./object}. + Path-style buckets are not supported. + It has been tested successfully with AWS S3 and CEPH Object Gateway S3. + \end{itemize} +\end{itemize} + + +\subsection{Installation} + +Install the package \package{bareos-storage-droplet} including its requirements +by using an appropriate package management tool +(eg. \command{yum}, \command{zypper}). + + + + + +\subsection{Configuration} +The droplet backend requires a \bareosDir \nameref{DirectorResourceStorage}, a \bareosSd \nameref{StorageResourceDevice} as well as a Droplet profile file where your access-- and secret--keys and other parameters for the connection to your object storage are stored. + +\subsubsection{AWS S3} +\label{sec:DropletAwsS3} + +\subsubsubsection{Director} + +First, we will create the new \bareosDir \nameref{DirectorResourceStorage}. + +For the following example, we +\begin{itemize} + \item choose the name \resourcename{Dir}{Storage}{S3_Object}. + \item choose \resourceDirectiveValue{Dir}{Storage}{Media Type}{S3_Object1}. We name it this way, in case we later add more separated Object Storages that don't have access to the same volumes. + \item assume the \bareosSd is located on the host \host{bareos-sd.example.com} and will offers the \nameref{StorageResourceDevice} \resourcename{Sd}{Device}{S3_ObjectStorage} (to be configured in the next section). +\end{itemize} + +\begin{bareosConfigResource}{bareos-dir}{storage}{S3\_Object} +Storage { + Name = "S3_Object" + Address = "bareos-sd.example.com" + Password = "secret" + Device = "AWS_S3_1-00" + Media Type = "S3_Object1" +} +\end{bareosConfigResource} + +These credentials are only used to connect to the \bareosSd. The credentials to access the object store (e.g. S3) are stored in the \bareosSd Droplet Profile. + + + +\subsubsubsection{Storage Daemon} + + +As of your \bareosSd configuration, we need to setup a new device that acts as a link to Object Storage backend. + +The name and media type must correspond to those settings in the \bareosDir \nameref{DirectorResourceStorage}: + +\begin{itemize} + \item \linkResourceDirective{Sd}{Device}{Name} = \linkResourceDirective{Dir}{Storage}{Device} + \item \linkResourceDirective{Sd}{Device}{Media Type} = \linkResourceDirective{Dir}{Storage}{Media Type} +\end{itemize} + +A device for the usage of AWS S3 object storage with a bucket named \path|backup-bareos| located in EU West 2, would look like this: + +\begin{bareosConfigResource}{bareos-sd}{device}{AWS\_S3\_1-00} +Device { + Name = "AWS_S3_1-00" + Media Type = "S3_Object1" + Archive Device = "AWS S3 Storage" + Device Type = droplet + Device Options = "profile=/etc/bareos/bareos-sd.d/droplet/aws.profile.conf,bucket=backup-bareos,location=eu-west-2,chunksize=100M" + LabelMedia = yes # Lets Bareos label unlabeled media + Random Access = yes + AutomaticMount = yes # When device opened, read it + RemovableMedia = no + AlwaysOpen = no + Maximum File Size = 500M # 500 MB (allows for seeking to small portions of the Volume) + Maximum Concurrent Jobs = 1 + Maximum Spool Size = 15000M +} +\end{bareosConfigResource} + + + +In these examples all the backup data is placed in the \path|bareos-backup| bucket on the defined S3 storage. +In contract to other \bareosSd backends, a Bareos volume is not represented by a single file. +Instead a volume is a sub-directory in the defined bucket +and every chunk is placed in the Volume directory with the filename 0000-9999 +and a size that is defined in the chunksize. +It is implemented this way, as S3 only allows reading full files, +so every append operation could result in reading the full volume file again. + +Following \linkResourceDirective{Sd}{Device}{Device Options} settings are possible: + +\begin{description} +\item[profile] Droplet profile path (e.g. /etc/bareos/bareos-sd.d/droplet/droplet.profile.conf). Make sure the profile file is readable for user \user{bareos}. +\item[location] Optional, but required for AWS Storage (e.g. eu-west-2 etc.) +\item[acl] Canned ACL +\item[storageclass] Storage Class to use. +\item[bucket] Bucket to store objects in. +\item[chunksize] Size of Volume Chunks (default = 10 Mb) +\item[iothreads] Number of IO-threads to use for uploads (if not set, blocking uploads are used) +\item[ioslots] Number of IO-slots per IO-thread (default 10). Set this to $>=$ 1 for cached and to 0 for direct writing. +\item[retries] Number of writing tries before discarding a job. Set this to 0 for unlimited retries. Setting anything $!=$ 0 here will cause dataloss if the backend is not available, so be very careful. +\item[mmap] Use mmap to allocate Chunk memory instead of malloc(). +\end{description} + + +Create the Droplet profile to be used. +This profile is used later by the droplet library when accessing your cloud storage. + +An example for AWS S3 could look like this: + +\begin{bareosConfigResource}{bareos-sd}{droplet}{aws.profile} +use_https = false # Default is false, if set to true you may use the SSL parameters given in the droplet configuration wiki, see below. +host = s3.amazonaws.com # This parameter is only used as baseurl and will be prepended with bucket and location set in device ressource to form correct url +access_key = myaccesskey +secret_key = mysecretkey +pricing_dir = "" # If not empty, an droplet.csv file will be created which will record all S3 operations. +backend = s3 +aws_auth_sign_version = 4 # Currently, AWS S3 uses version 4. The Ceph S3 gateway uses version 2. +\end{bareosConfigResource} + + +More arguments and the SSL parameters can be found in the documentation of the droplet library: +\url{https://github.com/scality/Droplet/wiki/Configuration-File} + +\subsubsection{CEPH Object Gateway S3} + +Please note, that there is also the \nameref{SdBackendRados} backend, +which can backup to CEPH directly. However, currently (17.2.7) the \sdBackend{Droplet}{S3} is known to outperform the \sdBackend{Rados}{} backend. + +While parameters have been explained in the \nameref{sec:DropletAwsS3} section, this gives an example about how to backup to a CEPH Object Gateway S3. + +\begin{bareosConfigResource}{bareos-dir}{storage}{S3\_Object} +Storage { + Name = "S3_Object" + Address = "bareos-sd.example.com" + Password = "secret" + Device = "CEPH_1-00" + Media Type = "S3_Object1" +} +\end{bareosConfigResource} + + +A device for CEPH object storage could look like this: +\begin{bareosConfigResource}{bareos-sd}{device}{CEPH\_1-00} +Device { + Name = "CEPH_1-00" + Media Type = "S3_Object1" + Archive Device = "Object S3 Storage" + Device Type = droplet + Device Options = "profile=/etc/bareos/bareos-sd.d/droplet/ceph.profile,bucket=backup-bareos,chunksize=100M" + LabelMedia = yes # Lets Bareos label unlabeled media + Random Access = yes + AutomaticMount = yes # When device opened, read it + RemovableMedia = no + AlwaysOpen = no + Maximum File Size = 500M # 500 MB (allows for seeking to small portions of the Volume) + Maximum Concurrent Jobs = 1 + Maximum Spool Size = 15000M +} +\end{bareosConfigResource} + + +And for CEPH it would be: +\begin{bareosConfigResource}{bareos-sd.d}{droplet}{ceph.profile} +use_https = false +host = CEPH-host.example.com +access_key = myaccesskey +secret_key = mysecretkey +pricing_dir = "" +backend = s3 +aws_auth_sign_version = 2 +\end{bareosConfigResource} + +Main differences are, that a location is not required and in the profile, \path|aws_auth_sign_version = 2| instead of 4. + + +\subsection{Troubleshooting} + +\hide{ +\subsubsection{S3 Backend Unreachable} + +The droplet device can run in two modes: + +\begin{itemize} + \item direct writing \path|(iothreads = 0)| + \item cached writing \path|(iothreads >= 1)| +\end{itemize} + +If \path|iothreads >= 1, retries = 0| (unlimited retries) and the \sdBackend{Droplet}{} (e.g. S3 storage) is not available, a job will continue running until the backend problem is fixed. +If this is the case and the job is canceled, it will only be canceled on the Director. It continues running on the Storage Daemon, until the S3 backend is available again or the Storage Daemon itself is restarted. + +If \path|iothreads >= 1, retries != 0| and the droplet backend (e.g. S3 storage) is not available, write operation will be silently discarded after the specified number of retries. + +\warning{This combination of option is dangerous. Don't use it.} + +%Caching when S3 backend is not available: +%This behaviour have not changed, but I fear problems can arise, if the backend is not available and all write operations are stored in memory. +} + +\subsubsection{iothreads} + +For testing following \linkResourceDirective{Sd}{Device}{Device Options} should be used: +\begin{itemize} + \item \path|iothreads=0| + \item \path|retries=1| +\end{itemize} + +If the S3 backend becomes or is unreachable, the storage daemon will behave depending on \argument{iothreads} and \argument{retries}. +When the storage daemon is using cached writing (\argument{iothreads}$>=1$) and \argument{retries} is set to zero (unlimited tries), the job will continue running until the backend becomes available again. The job cannot be canceled in this case, as the storage daemon will continuously try to write the cached files. + +Great caution should be used when using \argument{retries} > 0 combined with cached writing. If the backend becomes unavailable and the storage daemon reaches the predefined tries, the job will be discarded silently yet marked as \path|OK| in the \bareosDir. + +You can always check the status of the writing process by using \bcommand{status}{storage=...}. The current writing status will be displayed then: +\begin{bconsole}{status storage} +... +Device "S3_ObjectStorage" (S3) is mounted with: + Volume: Full-0085 + Pool: Full + Media type: S3_Object1 +Backend connection is working. +Inflight chunks: 2 +Pending IO flush requests: + /Full-0085/0002 - 10485760 (try=0) + /Full-0085/0003 - 10485760 (try=0) + /Full-0085/0004 - 10485760 (try=0) +... +Attached Jobs: 175 +... + +\end{bconsole} +\argument{Pending IO flush requests} means that there is data to be written. \argument{try}=0 means that this is the first try and no problem has occurred. If \argument{try} $>0$, problems occurred and the storage daemon will continue trying. + +Status without pending IO chunks: +\begin{bconsole}{status storage} +... +Device "S3_ObjectStorage" (S3) is mounted with: + Volume: Full-0084 + Pool: Full + Media type: S3_Object1 +Backend connection is working. +No Pending IO flush requests. +Configured device capabilities: + EOF BSR BSF FSR FSF EOM !REM RACCESS AUTOMOUNT LABEL !ANONVOLS !ALWAYSOPEN +Device state: + OPENED !TAPE LABEL !MALLOC APPEND !READ EOT !WEOT !EOF !NEXTVOL !SHORT MOUNTED + num_writers=0 reserves=0 block=8 +Attached Jobs: +... +\end{bconsole} + + +\subsubsection{AWS S3 Logging} + +If you use AWS S3 object storage and want to debug your bareos setup, it is recommended to turn on the server access logging in your bucket properties. You will see if bareos gets to try writing into your bucket or not. diff --git a/docs/manuals/en/main/storage-backends.tex b/docs/manuals/en/main/storage-backends.tex new file mode 100644 index 00000000000..c18c429657e --- /dev/null +++ b/docs/manuals/en/main/storage-backends.tex @@ -0,0 +1,55 @@ +\chapter{Storage Backends} + +A Bareos Storage Daemon can use various storage backends: + +\begin{description} +\item [\sdBackend{Tape}{}] is used to access tape device and thus has sequential access. +\item [\sdBackend{File}{}] + tells Bareos that the device is a file. It may either be a + file defined on fixed medium or a removable filesystem such as + USB. All files must be random access devices. +\item [\sdBackend{Fifo}{}] is a first-in-first-out sequential access read-only + or write-only device. +\item [\sdBackend{GFAPI}{GlusterFS}] is used to access a GlusterFS storage. +\item [\sdBackend{Rados}{Ceph Object Store}] is used to access a Ceph object store. +\item [\sdBackend{Droplet}{}] is used to access an object store supported by \package{libdroplet}, most notably S3. + For details, refer to \nameref{SdBackendDroplet}. +\end{description} + + +\input{storage-backend-droplet} + + +\section{GFAPI Storage Backend} +\label{SdBackendGfapi} + +\sdBackend{GFAPI}{GlusterFS} + +A GlusterFS Storage can be used as Storage backend of Bareos. +Prerequistes are a working GlusterFS storage system and the package \package{bareos-storage-glusterfs}. +See \url{http://www.gluster.org/} for more information regarding GlusterFS installation and configuration +and specifically \url{https://gluster.readthedocs.org/en/latest/Administrator Guide/Bareos/} +for Bareos integration. +You can use following snippet to configure it as storage device: +\bconfigInput{config/SdDeviceDeviceOptionsGfapi1.conf} +Adapt server and volume name to your environment. + +\sinceVersion{sd}{GlusterFS Storage}{15.2.0} + + + +\section{Rados Storage Backend} +\label{SdBackendRados} + +\sdBackend{Rados}{Ceph Object Store} + +Here you configure the Ceph object store, which is accessed by the SD using the Rados library. +Prerequistes are a +working Ceph object store and the package \package{bareos-storage-ceph}. +See \url{http://ceph.com} for more information regarding Ceph installation and configuration. +Assuming that you have an object store with name \file{poolname} +and your Ceph access is configured in \file{/etc/ceph/ceph.conf}, +you can use following snippet to configure it as storage device: +\bconfigInput{config/SdDeviceDeviceOptionsRados1.conf} + +\sinceVersion{sd}{Ceph Storage}{15.2.0}