Skip to content

Commit

Permalink
Merge pull request #290 from pstorz/dev/pstorz/master/backedup-spell-fix
Browse files Browse the repository at this point in the history
spelling: fixed the wrong "backuped" to "backed up"
  • Loading branch information
franku committed Oct 2, 2019
2 parents 28e5350 + d8bf7f9 commit f697110
Show file tree
Hide file tree
Showing 195 changed files with 203 additions and 203 deletions.
6 changes: 3 additions & 3 deletions core/README.NDMP
Expand Up @@ -31,7 +31,7 @@ The core fileindex handling code is rewritten to use callback
functions for doing the real work. This way we can hook in
internal functions into the core file indexing process which
happens after a backup and before a restore to fill the
files which have been backuped or restored.
files which have been backed up or restored.

Some missing initialization, commission and decommission is
added although it is empty it is better to have a consistent
Expand Down Expand Up @@ -280,8 +280,8 @@ Client {
Port = 10000
Protocol = NDMPv4 # Need to specify protocol before password as protocol determines password encoding used.
Auth Type = Clear # Clear == Clear Text, MD5 == Challenge protocol
Username = "ndmp" # username of the NDMP user on the DATA AGENT e.g. storage box being backuped.
Password = "test" # password of the NDMP user on the DATA AGENT e.g. storage box being backuped.
Username = "ndmp" # username of the NDMP user on the DATA AGENT e.g. storage box being backed up.
Password = "test" # password of the NDMP user on the DATA AGENT e.g. storage box being backed up.
}

#
Expand Down
2 changes: 1 addition & 1 deletion core/src/dird/ndmp_dma_generic.cc
Expand Up @@ -336,7 +336,7 @@ bool NdmpBuildClientJob(JobControlRecord* jcr,
}

/*
* The data_agent is the client being backuped or restored using NDMP.
* The data_agent is the client being backed up or restored using NDMP.
*/
ASSERT(client->password_.encoding == p_encoding_clear);
if (!fill_ndmp_agent_config(jcr, &job->data_agent, client->Protocol,
Expand Down
2 changes: 1 addition & 1 deletion core/src/dird/ndmp_dma_restore_NDMP_BAREOS.cc
Expand Up @@ -241,7 +241,7 @@ static inline bool fill_restore_environment(JobControlRecord* jcr,
}

/*
* Tell the data engine what was backuped.
* Tell the data engine what was backed up.
*/
pv.name = ndmp_env_keywords[NDMP_ENV_KW_FILESYSTEM];
pv.value = ndmp_filesystem;
Expand Down
2 changes: 1 addition & 1 deletion core/src/filed/accurate.cc
Expand Up @@ -138,7 +138,7 @@ bool AccurateFinish(JobControlRecord* jcr)

/**
* This function is called for each file seen in fileset.
* We check in file_list hash if fname have been backuped
* We check in file_list hash if fname have been backed up
* the last time. After we can compare Lstat field.
* Full Lstat usage have been removed on 6612
*
Expand Down
2 changes: 1 addition & 1 deletion core/src/filed/fd_plugins.cc
Expand Up @@ -2472,7 +2472,7 @@ static bRC bareosNewPreInclude(bpContext* ctx)
}

/**
* Check if a file have to be backuped using Accurate code
* Check if a file have to be backed up using Accurate code
*/
static bRC bareosCheckChanges(bpContext* ctx, struct save_pkt* sp)
{
Expand Down
2 changes: 1 addition & 1 deletion core/src/findlib/shadowing.cc
Expand Up @@ -26,7 +26,7 @@
/**
* @file
* Detect fileset shadowing e.g. when an include entry pulls in data
* which is already being backuped by another include pattern. Currently
* which is already being backed up by another include pattern. Currently
* we support both local and global shadowing. Where local shadowing is
* when the shadowing occurs within one include block and global when
* between multiple include blocks.
Expand Down
2 changes: 1 addition & 1 deletion core/src/plugins/filed/BareosFdPluginLDAP.py
Expand Up @@ -313,7 +313,7 @@ def to_unix_timestamp(self, context, timestamp):

def get_next_file_to_backup(self, context, savepkt):
'''
Find out the next file that should be backuped
Find out the next file that should be backed up
'''
# When file_to_backup is not None we should return the LDIF.
if self.file_to_backup:
Expand Down
Expand Up @@ -7,7 +7,7 @@ FileSet {
xattrsupport = yes
}
# adapt this to your environment
# basedir: optional, othewrwise all data will be backuped
# basedir: optional, othewrwise all data will be backed up
Plugin = "cephfs:conffile=<ceph_conf_file>:basedir=<basedir>:"
}
}
Expand Down
2 changes: 1 addition & 1 deletion core/src/plugins/filed/python-fd.cc
Expand Up @@ -2943,7 +2943,7 @@ static PyObject* PyBareosNewPreInclude(PyObject* self, PyObject* args)

/**
* Callback function which is exposed as a part of the additional methods which
* allow a Python plugin to issue a check if a file have to be backuped using
* allow a Python plugin to issue a check if a file have to be backed up using
* Accurate code.
*/
static PyObject* PyBareosCheckChanges(PyObject* self, PyObject* args)
Expand Down
2 changes: 1 addition & 1 deletion core/src/plugins/filed/python-fd.h
Expand Up @@ -732,7 +732,7 @@ static PyMethodDef BareosFDMethods[] = {
{"NewPreInclude", PyBareosNewPreInclude, METH_VARARGS,
"Add new pre include block"},
{"CheckChanges", PyBareosCheckChanges, METH_VARARGS,
"Check if a file have to be backuped using Accurate code"},
"Check if a file have to be backed up using Accurate code"},
{"AcceptFile", PyBareosAcceptFile, METH_VARARGS,
"Check if a file would be saved using current Include/Exclude code"},
{"SetSeenBitmap", PyBareosSetSeenBitmap, METH_VARARGS,
Expand Down
2 changes: 1 addition & 1 deletion docs/manuals/source/Appendix/BackwardCompatibility.rst
Expand Up @@ -127,7 +127,7 @@ Proceed with the following steps:
/usr/lib/bareos/create_bareos_database
- Insert backuped db into new database:
- Insert backed up db into new database:

.. code-block:: shell-session
Expand Down
2 changes: 1 addition & 1 deletion docs/manuals/source/Appendix/Howtos.rst
Expand Up @@ -186,7 +186,7 @@ instance
Defines the instance within the database server.

database
Defines the database that should get backuped.
Defines the database that should get backed up.

username and password
Username and Password are required, when the connection is done using a MSSQL user. If the systemaccount the bareos-fd runs with has succifient permissions, this is not required.
Expand Down
6 changes: 3 additions & 3 deletions docs/manuals/source/DeveloperGuide/api.rst
Expand Up @@ -482,7 +482,7 @@ Bvfs API
--------

The BVFS (Bareos Virtual File System) do provide a API for browsing the
backuped files in the catalog and select files for restoring.
backed up files in the catalog and select files for restoring.

The Bvfs module works correctly with BaseJobs, Copy and Migration jobs.

Expand Down Expand Up @@ -822,7 +822,7 @@ Example for directory browsing using bvfs
130 0 0 A A A A A A A A A A A A A A ..
1 23 123 z GiuU EH9 C GHH GHH A BAA BAA I BWA5Px BaIDUN BaIDUN A A C sbin/

# the first really backuped path is /sbin/ (pathid=1)
# the first really backed up path is /sbin/ (pathid=1)
# as it has values other than 0 for FileId, JobId and LStat.
# Now we check, if it has futher subdirectories.
*.bvfs_lsdir jobid=1 pathid=1
Expand All @@ -843,4 +843,4 @@ Example for directory browsing using bvfs
1 18 123 z Gli+ IHo B GHH GHH A NVkY BAA BrA BaIDUJ BaIDUJ BaIDUJ A A C 928EB+EJGFtWD7wQ8bVjew Full-0001 0
1 1067 127 z Glnc IHo B GHH GHH A NVkY BAA BrA BaKDT2 BaKDT2 BaKDT2 A A C 928EB+EJGFtWD7wQ8bVjew Incremental-0007 0

# multiple versions of the file bareos-dir have been backuped.
# multiple versions of the file bareos-dir have been backed up.
Expand Up @@ -604,7 +604,7 @@ Job Retention = <time-period-specification>
:index:`\ <single: Job; Retention>`\ :index:`\ <single: Retention; Job>`\ The Job Retention record defines the length of time that Bareos will keep Job records in the Catalog database. When this time period expires, and if AutoPrune is set to yes Bareos will prune (remove) Job records that are older than the specified Job Retention period. Note, if a Job record is selected for pruning, all associated File and JobMedia records will also be pruned regardless of the File Retention
period set. As a consequence, you normally will set the File retention period to be less than the Job retention period.

As mentioned above, once the File records are removed from the database, you will no longer be able to restore individual files from the Job. However, as long as the Job record remains in the database, you will be able to restore all the files backuped for the Job. As a consequence, it is generally a good idea to retain the Job records much longer than the File records.
As mentioned above, once the File records are removed from the database, you will no longer be able to restore individual files from the Job. However, as long as the Job record remains in the database, you will be able to restore all the files backed up for the Job. As a consequence, it is generally a good idea to retain the Job records much longer than the File records.

The retention period is specified in seconds, but as a convenience, there are a number of modifiers that permit easy specification in terms of minutes, hours, days, weeks, months, quarters, or years. See the :ref:`Configuration chapter <Time>` of this manual for additional details of modifier specification.

Expand Down
Expand Up @@ -8,7 +8,7 @@ File Deduplication using Base Jobs
A base job is sort of like a Full save except that you will want the FileSet to contain only files that are unlikely to change in the future (i.e. a snapshot of most of your system after installing it). After the base job has been run, when you are doing a Full save, you specify one or more Base jobs to be used. All files that have been backed up in the Base job/jobs but not modified will then be excluded from the backup. During a restore, the Base jobs will be automatically pulled in where
necessary.

Imagine having 100 nearly identical Windows or Linux machine containing the OS and user files. Now for the OS part, a Base job will be backed up once, and rather than making 100 copies of the OS, there will be only one. If one or more of the systems have some files updated, no problem, they will be automatically backuped.
Imagine having 100 nearly identical Windows or Linux machine containing the OS and user files. Now for the OS part, a Base job will be backed up once, and rather than making 100 copies of the OS, there will be only one. If one or more of the systems have some files updated, no problem, they will be automatically backed up.

A new Job directive :strong:`Base=JobX,JobY,...`\ permits to specify the list of files that will be used during Full backup as base.

Expand Down
Expand Up @@ -246,8 +246,8 @@ In our example we connect to a Isilon storage appliance emulator:
Port = 10000 # Default port for NDMP
Protocol = NDMPv4 # Need to specify protocol before password as protocol determines password encoding used
Auth Type = Clear # Cleartext Authentication
Username = "ndmpadmin" # username of the NDMP user on the DATA AGENT e.g. storage box being backuped.
Password = "secret" # password of the NDMP user on the DATA AGENT e.g. storage box being backuped.
Username = "ndmpadmin" # username of the NDMP user on the DATA AGENT e.g. storage box being backed up.
Password = "secret" # password of the NDMP user on the DATA AGENT e.g. storage box being backed up.
}
Verify, that you can access your Primary Storage System via Bareos:
Expand Down Expand Up @@ -1395,7 +1395,7 @@ To be able to do scheduled backups, we need to configure a backup job that will
FileSet = isilon
}
As we also need to be able to do a restore of the backuped data, we also need to define an adequate restore job:
As we also need to be able to do a restore of the backed up data, we also need to define an adequate restore job:

.. code-block:: bareosconfig
:caption: bareos-dir.d/Job/ndmp-native-restore-job.conf
Expand Down
2 changes: 1 addition & 1 deletion regress/tests/1-example-test
Expand Up @@ -29,7 +29,7 @@ BackupDirectory="${tmp}/data"
# Remove old configuration, working and tmp files. Setup the database.
cleanup

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/2drive-2disk
Expand Up @@ -16,7 +16,7 @@ BackupDirectory="${tmp}/data"

scripts/cleanup

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/2drive-2job-test
Expand Up @@ -16,7 +16,7 @@ BackupDirectory="${tmp}/data"

scripts/cleanup

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/2drive-3pool-test
Expand Up @@ -25,7 +25,7 @@ start_test
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/2drive-concurrent-test
Expand Up @@ -21,7 +21,7 @@ start_test
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/2drive-incremental-2disk
Expand Up @@ -29,7 +29,7 @@ start_test
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/flat-c.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/2drive-offline-test
Expand Up @@ -24,7 +24,7 @@ start_test
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/2drive-swap-test
Expand Up @@ -21,7 +21,7 @@ start_test
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/accurate-test
Expand Up @@ -35,7 +35,7 @@ BackupDirectory="${tmp}/data"
# Remove old configuration, working and tmp files. Setup the database.
cleanup

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/acl-xattr-test
Expand Up @@ -116,7 +116,7 @@ cp ${conf}/bareos-dir.conf $cwd/tmp/1
sed -f ${outf} ${cwd}/tmp/1 >${conf}/bareos-dir.conf


# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/action-on-purge-test
Expand Up @@ -23,7 +23,7 @@ scripts/copy-test-confs
sed 's/Pool Type = Backup/Pool Type = Backup; ActionOnPurge = Truncate/' $conf/bareos-dir.conf > $tmp/1
cp $tmp/1 $conf/bareos-dir.conf

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/allowcompress-test
Expand Up @@ -15,7 +15,7 @@ scripts/copy-test-confs
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/always-incremental-test
Expand Up @@ -16,7 +16,7 @@ BackupDirectory="${tmp}/data"
# Remove old configuration, working and tmp files. Setup the database.
cleanup

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/ansi-label-tape
Expand Up @@ -22,7 +22,7 @@ scripts/copy-tape-confs
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/auto-label-test
Expand Up @@ -13,7 +13,7 @@ copy_test_confs
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
Expand Up @@ -14,7 +14,7 @@ scripts/copy-confs
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/backup-bareos-passive-test
Expand Up @@ -31,7 +31,7 @@ change_jobname BackupClient1FileList $JobName
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/backup-bareos-tape
Expand Up @@ -19,7 +19,7 @@ scripts/copy-tape-confs
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/backup-bareos-test
Expand Up @@ -25,7 +25,7 @@ change_jobname BackupClient1FileList $JobName
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/backup-to-null
Expand Up @@ -19,7 +19,7 @@ scripts/copy-fifo-confs
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/backup-win32-tape
Expand Up @@ -17,7 +17,7 @@ scripts/copy-win32-confs
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/bad-label-changer
Expand Up @@ -27,7 +27,7 @@ change_jobname $JobName
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/base-job-test
Expand Up @@ -31,7 +31,7 @@ BackupDirectory="${tmp}/data"
# Remove old configuration, working and tmp files. Setup the database.
cleanup

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data".
setup_data data/po.tgz

Expand Down
2 changes: 1 addition & 1 deletion regress/tests/bextract-test
Expand Up @@ -16,7 +16,7 @@ change_jobname NightlySave $JobName
# This directory will be created by setup_data().
BackupDirectory="${tmp}/data"

# Use a tgz to setup data to be backuped.
# Use a tgz to setup data to be backed up.
# Data will be placed at "${tmp}/data/".
setup_data data/small.tgz

Expand Down

0 comments on commit f697110

Please sign in to comment.