Skip to content

Commit

Permalink
dird: add config directive and interface to director config
Browse files Browse the repository at this point in the history
- added GetAllJobResourcesByClientName
- added ScheduleOnClientConnectInterval and documentation
- added tests
  • Loading branch information
franku committed Nov 8, 2019
1 parent 3f2b5fb commit a8d69c0
Show file tree
Hide file tree
Showing 26 changed files with 469 additions and 1 deletion.
20 changes: 20 additions & 0 deletions core/src/dird/dird_conf.cc
Expand Up @@ -448,6 +448,9 @@ ResourceItem job_items[] = {
"If \"AlwaysIncrementalMaxFullAge\" is set, during consolidations only incremental backups will be considered while the Full Backup remains to reduce the amount of data being consolidated. Only if the Full Backup is older than \"AlwaysIncrementalMaxFullAge\", the Full Backup will be part of the consolidation to avoid the Full Backup becoming too old ." },
{ "MaxFullConsolidations", CFG_TYPE_PINT32, ITEM(res_job, MaxFullConsolidations), 0, CFG_ITEM_DEFAULT, "0", "16.2.4-",
"If \"AlwaysIncrementalMaxFullAge\" is configured, do not run more than \"MaxFullConsolidations\" consolidation jobs that include the Full backup."},
{ "ScheduleOnClientConnectInterval", CFG_TYPE_TIME, ITEM(res_job, ScheduleOnClientConnectInterval), 0, CFG_ITEM_DEFAULT, "0", "19.2.4-",
"The interval specifies the time between the most recent successful backup (counting from start time) and the "
"event of a client initiated connection. When this interval is exceeded the job is started automatically." },
{nullptr, 0, 0, nullptr, 0, 0, nullptr, nullptr, nullptr}
};

Expand Down Expand Up @@ -4191,4 +4194,21 @@ static bool SaveResource(int type, ResourceItem* items, int pass)
return true;
}

std::vector<JobResource*> GetAllJobResourcesByClientName(std::string name)
{
std::vector<JobResource*> all_matching_jobs;
JobResource* job{nullptr};

do {
job = static_cast<JobResource*>(my_config->GetNextRes(R_JOB, job));
if (job && job->client) {
if (std::string{job->client->resource_name_} == name) {
all_matching_jobs.push_back(job);
}
}
} while (job);

return all_matching_jobs;
}

} /* namespace directordaemon */
4 changes: 3 additions & 1 deletion core/src/dird/dird_conf.h
Expand Up @@ -474,7 +474,8 @@ class JobResource : public BareosResource {
utime_t DuplicateJobProximity = {0}; /**< Permitted time between duplicicates */
utime_t AlwaysIncrementalJobRetention = {0}; /**< Timeinterval where incrementals are not consolidated */
utime_t AlwaysIncrementalMaxFullAge = {0}; /**< If Full Backup is older than this age
* * the consolidation job will include also the full */
* the consolidation job will include also the full */
utime_t ScheduleOnClientConnectInterval = {0};
int64_t spool_size = 0; /**< Size of spool file for this job */
int64_t max_bandwidth = 0; /**< Speed limit on this job */
int64_t FileHistSize = 0; /**< Hint about the size of the expected File history */
Expand Down Expand Up @@ -734,6 +735,7 @@ extern "C" char* job_code_callback_director(JobControlRecord* jcr, const char*);
const char* GetUsageStringForConsoleConfigureCommand();
void DestroyConfigureUsageString();
bool PopulateDefs();
std::vector<JobResource*> GetAllJobResourcesByClientName(std::string name);

} /* namespace directordaemon */
#endif // BAREOS_DIRD_DIRD_CONF_H_
21 changes: 21 additions & 0 deletions core/src/tests/CMakeLists.txt
Expand Up @@ -385,3 +385,24 @@ target_link_libraries(test_watchdog_timer

gtest_discover_tests(test_watchdog_timer TEST_PREFIX gtest:)

####### client-initiated-reconnect #####################################
add_executable(client-initiated-reconnect client-initiated-reconnect.cc)

target_link_libraries(client-initiated-reconnect
dird_objects
bareos
bareosfind
bareoscats
bareossql
${LMDB_LIBS}
${NDMP_LIBS}
${GTEST_LIBRARIES}
${GTEST_MAIN_LIBRARIES}
)

IF(HAVE_PAM)
target_link_libraries(client-initiated-reconnect ${PAM_LIBRARIES})
ENDIF()

gtest_discover_tests(client-initiated-reconnect TEST_PREFIX gtest:)

89 changes: 89 additions & 0 deletions core/src/tests/client-initiated-reconnect.cc
@@ -0,0 +1,89 @@
/*
BAREOS® - Backup Archiving REcovery Open Sourced
Copyright (C) 2019-2019 Bareos GmbH & Co. KG
This program is Free Software; you can redistribute it and/or
modify it under the terms of version three of the GNU Affero General Public
License as published by the Free Software Foundation, which is
listed in the file LICENSE.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
02110-1301, USA.
*/

#include "gtest/gtest.h"
#include "include/bareos.h"
#include "include/jcr.h"
#include "lib/parse_conf.h"
#include "dird/dird_globals.h"
#include "dird/dird_conf.h"
#include "dird/scheduler.h"
#include "dird/job.h"

#include <string>
#include <vector>

namespace directordaemon {

bool DoReloadConfig() { return false; }

class ClientInitiatedReconnect : public ::testing::Test {
void SetUp() override;
void TearDown() override;
};

void ClientInitiatedReconnect::SetUp()
{
InitMsg(nullptr, nullptr);

std::string path_to_config_file = std::string(
PROJECT_SOURCE_DIR "/src/tests/configs/client-initiated-reconnect");
my_config = InitDirConfig(path_to_config_file.c_str(), M_ERROR_TERM);
my_config->ParseConfig();
}

void ClientInitiatedReconnect::TearDown() { delete my_config; }

static bool find(std::vector<JobResource*> jobs, std::string jobname)
{
return jobs.end() !=
std::find_if(jobs.begin(), jobs.end(), [&jobname](JobResource* job) {
return std::string{job->resource_name_} == jobname;
});
}

TEST_F(ClientInitiatedReconnect, find_all_jobs_for_client)
{
std::vector<JobResource*> jobs{GetAllJobResourcesByClientName("bareos-fd")};

EXPECT_EQ(jobs.size(), 4);

EXPECT_TRUE(find(jobs, "backup-bareos-fd"));
EXPECT_TRUE(find(jobs, "backup-bareos-fd-reconnect"));
EXPECT_TRUE(find(jobs, "backup-bareos-fd-reconnect-2"));
EXPECT_TRUE(find(jobs, "RestoreFiles"));
}

TEST_F(ClientInitiatedReconnect, find_all_connect_interval_jobs_for_client)
{
std::vector<JobResource*> jobs{GetAllJobResourcesByClientName("bareos-fd")};

auto end = std::remove_if(jobs.begin(), jobs.end(), [](JobResource* job) {
return job->ScheduleOnClientConnectInterval == 0;
});
jobs.erase(end, jobs.end());
EXPECT_EQ(jobs.size(), 2);

EXPECT_TRUE(find(jobs, "backup-bareos-fd-reconnect"));
EXPECT_TRUE(find(jobs, "backup-bareos-fd-reconnect-2"));
}

} // namespace directordaemon
@@ -0,0 +1,8 @@
Catalog {
Name = MyCatalog
#dbdriver = "postgresql"
dbdriver = "XXX_REPLACE_WITH_DATABASE_DRIVER_XXX"
dbname = "regress_backup_bareos_test"
dbuser = "regress"
dbpassword = ""
}
@@ -0,0 +1,31 @@
Client {
Name = bareos-fd
Description = "Client resource of the Director itself."
Address = localhost
Password = "fd_password" # password for FileDaemon
FD PORT = 42002
}

Client {
Name = bareos-fd-duplicate-interface
Description = "Client resource with identical network interface for setdebug testing"
Address = localhost
Password = "fd_password" # password for FileDaemon
FD PORT = 42002
}

Client {
Name = bareos-fd2
Description = "Client for setdebug testing."
Address = 192.168.101.1
Password = "fd_password" # password for FileDaemon
FD PORT = 42002
}

Client {
Name = bareos-fd3
Description = "Client for setdebug testing."
Address = 192.168.101.1
Password = "fd_password" # password for FileDaemon
FD PORT = 42003
}
@@ -0,0 +1,7 @@
Console {
Name = bareos-mon
Description = "Restricted console used by tray-monitor to get the status of the director."
Password = "mon_dir_password"
CommandACL = status, .status
JobACL = *all*
}
@@ -0,0 +1,27 @@
Director { # define myself
Name = bareos-dir
QueryFile = "/tmp/scripts/query.sql"
Maximum Concurrent Jobs = 10
Password = "dir_password" # Console password
Messages = Daemon
Auditing = yes

# Enable the Heartbeat if you experience connection losses
# (eg. because of your router or firewall configuration).
# Additionally the Heartbeat can be enabled in bareos-sd and bareos-fd.
#
# Heartbeat Interval = 1 min

# remove comment in next line to load dynamic backends from specified directory
Backend Directory = /tmp/core/src/cats

# remove comment from "Plugin Directory" to load plugins from specified directory.
# if "Plugin Names" is defined, only the specified plugins will be loaded,
# otherwise all director plugins (*-dir.so) from the "Plugin Directory".
#
# Plugin Directory = "/tmp/plugindir"
# Plugin Names = ""
Working Directory = "/tmp/tests/backup-bareos-test/working"
Pid Directory = "/tmp/piddir"
DirPort = 42001
}
@@ -0,0 +1,11 @@
FileSet {
Name = "Catalog"
Description = "Backup the catalog dump and Bareos configuration files."
Include {
Options {
signature = MD5
}
File = "/tmp/tests/backup-bareos-test/working/regress_backup_bareos_test.sql" # database dump
File = "/tmp/tests/backup-bareos-test/etc/bareos" # configuration
}
}
@@ -0,0 +1,31 @@
FileSet {
Name = "LinuxAll"
Description = "Backup all regular filesystems, determined by filesystem type."
Include {
Options {
Signature = MD5 # calculate md5 checksum per file
One FS = No # change into other filessytems
FS Type = btrfs
FS Type = ext2 # filesystems of given types will be backed up
FS Type = ext3 # others will be ignored
FS Type = ext4
FS Type = reiserfs
FS Type = jfs
FS Type = xfs
FS Type = zfs
}
File = /
}
# Things that usually have to be excluded
# You have to exclude /tmp/tests/backup-bareos-test/storage
# on your bareos server
Exclude {
File = /tmp/tests/backup-bareos-test/working
File = /tmp/tests/backup-bareos-test/storage
File = /proc
File = /tmp
File = /var/tmp
File = /.journal
File = /.fsck
}
}
@@ -0,0 +1,11 @@
FileSet {
Name = "SelfTest"
Description = "fileset just to backup some files for selftest"
Include {
Options {
Signature = MD5 # calculate md5 checksum per file
}
#File = "/tmp/sbin"
File=</tmp/tests/backup-bareos-test/tmp/file-list
}
}
@@ -0,0 +1,17 @@
FileSet {
Name = "Windows All Drives"
Enable VSS = yes
Include {
Options {
Signature = MD5
Drive Type = fixed
IgnoreCase = yes
WildFile = "[A-Z]:/pagefile.sys"
WildDir = "[A-Z]:/RECYCLER"
WildDir = "[A-Z]:/$RECYCLE.BIN"
WildDir = "[A-Z]:/System Volume Information"
Exclude = yes
}
File = /
}
}
@@ -0,0 +1,21 @@
Job {
Name = "BackupCatalog"
Description = "Backup the catalog database (after the nightly save)"
JobDefs = "DefaultJob"
Level = Full
FileSet="Catalog"
Schedule = "WeeklyCycleAfterBackup"

# This creates an ASCII copy of the catalog
# Arguments to make_catalog_backup.pl are:
# make_catalog_backup.pl <catalog-name>
RunBeforeJob = "/tmp/scripts/make_catalog_backup.pl MyCatalog"

# This deletes the copy of the catalog
RunAfterJob = "/tmp/scripts/delete_catalog_backup"

# This sends the bootstrap via mail for disaster recovery.
# Should be sent to another system, please change recipient accordingly
Write Bootstrap = "|/tmp/bin/bsmtp -h smtp_host -f \"\(Bareos\) \" -s \"Bootstrap for Job %j\" root@localhost" # (#01)
Priority = 11 # run after main backup
}
@@ -0,0 +1,11 @@
Job {
Name = "RestoreFiles"
Description = "Standard Restore template. Only one such job is needed for all standard Jobs/Clients/Storage ..."
Type = Restore
Client = bareos-fd
FileSet = "LinuxAll"
Storage = File
Pool = Incremental
Messages = Standard
Where = /tmp/tests/backup-bareos-test/tmp/tmp/bareos-restores
}
@@ -0,0 +1,27 @@
Job {
Name = "backup-bareos-fd"
JobDefs = "DefaultJob"
Client = "bareos-fd"
}

Job {
Name = "backup-bareos-fd-reconnect"
JobDefs = "DefaultJob"
Client = "bareos-fd"
ScheduleOnClientConnectInterval = 4h
}

Job {
Name = "backup-bareos-fd-reconnect-2"
JobDefs = "DefaultJob"
Client = "bareos-fd"
ScheduleOnClientConnectInterval = 4h
}

Job {
Name = "backup-bareos-fd2-reconnect"
JobDefs = "DefaultJob"
Client = "bareos-fd2"
ScheduleOnClientConnectInterval = 4h
}

@@ -0,0 +1,16 @@
JobDefs {
Name = "DefaultJob"
Type = Backup
Level = Incremental
Client = bareos-fd
FileSet = "SelfTest" # selftest fileset (#13)
Schedule = "WeeklyCycle"
Storage = File
Messages = Standard
Pool = Incremental
Priority = 10
Write Bootstrap = "/tmp/tests/backup-bareos-test/working/%c.bsr"
Full Backup Pool = Full # write Full Backups into "Full" Pool (#05)
Differential Backup Pool = Differential # write Diff Backups into "Differential" Pool (#08)
Incremental Backup Pool = Incremental # write Incr Backups into "Incremental" Pool (#11)
}

0 comments on commit a8d69c0

Please sign in to comment.