Skip to content
This repository has been archived by the owner on Mar 19, 2021. It is now read-only.

Commit

Permalink
Added "status subscription" and "rerun" test.
Browse files Browse the repository at this point in the history
We now have a regression test for the option to
check for the number of subscriptions.

Also, we check the different options available for
the "rerun" commmand.

Signed-off-by: Marco van Wieringen <marco.van.wieringen@bareos.com>
  • Loading branch information
pstorz authored and Marco van Wieringen committed Jun 4, 2013
1 parent a2d2ce6 commit 1076c04
Show file tree
Hide file tree
Showing 5 changed files with 454 additions and 0 deletions.
2 changes: 2 additions & 0 deletions DartTestfile.txt.in
@@ -1,3 +1,5 @@
ADD_TEST(disk:rerun-test "@regressdir@/tests/rerun-test")
ADD_TEST(disk:subscription-test "@regressdir@/tests/subscription-test")
ADD_TEST(disk:acl-xattr-test "@regressdir@/tests/acl-xattr-test")
ADD_TEST(disk:action-on-purge-test "@regressdir@/tests/action-on-purge-test")
ADD_TEST(disk:accurate-test "@regressdir@/tests/accurate-test")
Expand Down
122 changes: 122 additions & 0 deletions configs/rerun-test/bareos-dir.conf.rerun-test.in
@@ -0,0 +1,122 @@
#
# Default Bareos Director Configuration file
#
# The only thing that MUST be changed is to add one or more
# file or directory names in the Include directive of the
# FileSet resource.
#
# For Bareos release 1.39.27 (24 October 2006) -- debian testing/unstable
#
# You might also want to change the default email address
# from root to your address. See the "mail" and "operator"
# directives in the Messages resource.
#

Director { # define myself
Name = @hostname@-dir
DIRPort = @dirport@ # where we listen for UA connections
QueryFile = "@scriptdir@/query.sql"
WorkingDirectory = "@working_dir@"
PidDirectory = "@piddir@"
SubSysDirectory = "@subsysdir@"
Maximum Concurrent Jobs = 4
Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
Messages = Standard
}

JobDefs {
Name = "DefaultJob"
Type = Backup
Level = Incremental
Client = client
FileSet = FS_TESTJOB
Storage = File
Messages = Standard
Pool = Default
Priority = 10
Maximum Concurrent Jobs = 16
}

Job {
Name = "BackupClient1"
JobDefs = "DefaultJob"
Client Run Before Job = "/bin/false" # let the job always fail
}


FileSet {
Name = FS_TESTJOB
Include {
File=<@tmpdir@/file-list
}
}

#dir: BeforeJob: run command "/bin/echo RunBeforeJob"
#fd: ClientRunBeforeJob: ClientRunBeforeJob
#fd: ClientAfterJob: run command "/bin/echo ClientRunAfterJob"
#dir: AfterJob: run command "/bin/echo RunAfterJob"


# Client (File Services) to backup
Client {
Name = client
Address = @hostname@
FDPort = @fdport@
Catalog = MyCatalog
Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
File Retention = 30d # 30 days
Job Retention = 180d # six months
AutoPrune = yes # Prune expired Jobs/Files
Maximum Concurrent Jobs = 4
}

# second Client (File Services) to backup
Client {
Name = @hostname@2-fd
Address = @hostname@
FDPort = @fdport@
Catalog = MyCatalog
Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
File Retention = 30d # 30 days
Job Retention = 180d # six months
AutoPrune = yes # Prune expired Jobs/Files
Maximum Concurrent Jobs = 4

}




# Definiton of file storage device
Storage {
Name = File
Address = @hostname@ # N.B. Use a fully qualified name here
SDPort = @sdport@
Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
Device = FileStorage
Media Type = File
Maximum Concurrent Jobs = 4
}

# Generic catalog service
Catalog {
Name = MyCatalog
@libdbi@
dbname = @db_name@; user = @db_user@; password = "@db_password@"
}


Messages {
Name = Standard
console = all, !skipped, !saved
catalog = all, !skipped
}

# Default pool definition
Pool {
Name = Default
Pool Type = Backup
Recycle = yes # Bareos can automatically recycle Volumes
AutoPrune = yes # Prune expired volumes
Volume Retention = 365d # one year
}
123 changes: 123 additions & 0 deletions configs/subscription-test/bareos-dir.conf.subscription-test.in
@@ -0,0 +1,123 @@
#
# Default Bareos Director Configuration file
#
# The only thing that MUST be changed is to add one or more
# file or directory names in the Include directive of the
# FileSet resource.
#
# For Bareos release 1.39.27 (24 October 2006) -- debian testing/unstable
#
# You might also want to change the default email address
# from root to your address. See the "mail" and "operator"
# directives in the Messages resource.
#

Director { # define myself
Name = @hostname@-dir
DIRPort = @dirport@ # where we listen for UA connections
QueryFile = "@scriptdir@/query.sql"
WorkingDirectory = "@working_dir@"
PidDirectory = "@piddir@"
SubSysDirectory = "@subsysdir@"
Maximum Concurrent Jobs = 4
Password = "pNvX1WiXnwv2C/F7E52LGvw6rKjbbPvu2kyuPa9pVaL3" # Console password
Messages = Standard
Subscriptions = 1
}

JobDefs {
Name = "DefaultJob"
Type = Backup
Level = Incremental
Client = client
FileSet = FS_TESTJOB
Storage = File
Messages = Standard
Pool = Default
Priority = 10
Maximum Concurrent Jobs = 16
}

Job {
Name = "BackupClient1"
JobDefs = "DefaultJob"
}


FileSet {
Name = FS_TESTJOB
Include {
File=<@tmpdir@/file-list
}
}

#dir: BeforeJob: run command "/bin/echo RunBeforeJob"
#fd: ClientRunBeforeJob: ClientRunBeforeJob
#fd: ClientAfterJob: run command "/bin/echo ClientRunAfterJob"
#dir: AfterJob: run command "/bin/echo RunAfterJob"


# Client (File Services) to backup
Client {
Name = client
Address = @hostname@
FDPort = @fdport@
Catalog = MyCatalog
Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
File Retention = 30d # 30 days
Job Retention = 180d # six months
AutoPrune = yes # Prune expired Jobs/Files
Maximum Concurrent Jobs = 4

}

# second Client (File Services) to backup
Client {
Name = @hostname@2-fd
Address = @hostname@
FDPort = @fdport@
Catalog = MyCatalog
Password = "xevrjURYoCHhn26RaJoWbeWXEY/a3VqGKp/37tgWiuHc" # password for FileDaemon
File Retention = 30d # 30 days
Job Retention = 180d # six months
AutoPrune = yes # Prune expired Jobs/Files
Maximum Concurrent Jobs = 4

}




# Definiton of file storage device
Storage {
Name = File
Address = @hostname@ # N.B. Use a fully qualified name here
SDPort = @sdport@
Password = "ccV3lVTsQRsdIUGyab0N4sMDavui2hOBkmpBU0aQKOr9"
Device = FileStorage
Media Type = File
Maximum Concurrent Jobs = 4
}

# Generic catalog service
Catalog {
Name = MyCatalog
@libdbi@
dbname = @db_name@; user = @db_user@; password = "@db_password@"
}


Messages {
Name = Standard
console = all, !skipped, !saved
catalog = all, !skipped
}

# Default pool definition
Pool {
Name = Default
Pool Type = Backup
Recycle = yes # Bareos can automatically recycle Volumes
AutoPrune = yes # Prune expired volumes
Volume Retention = 365d # one year
}
102 changes: 102 additions & 0 deletions tests/rerun-test
@@ -0,0 +1,102 @@
#!/bin/sh
#
#
TestName="rerun-test"
JobName=BackupClient1
. scripts/functions

scripts/cleanup
scripts/copy-test-confs



/bin/cp -f ${rconfigs}/${TestName}/bareos-dir.conf.${TestName} bin/bareos-dir.conf

echo "${cwd}/build/src/tests" >${cwd}/tmp/file-list

change_jobname $JobName
start_test

cat <<END_OF_DATA >${cwd}/tmp/bconcmds
@$out ${cwd}/tmp/log1.out
messages
label storage=File volume=TestVolume001
run job=$JobName yes
wait
messages
@$out ${cwd}/tmp/log2.out
rerun jobid=1 yes
wait
messages
@$out ${cwd}/tmp/log3.out
rerun hours=1 yes
wait
messages
@$out ${cwd}/tmp/log4.out
rerun days=1 yes
wait
messages
@$out ${cwd}/tmp/log5.out
rerun since_jobid=1 yes
wait
messages
quit
END_OF_DATA
run_bareos

#scripts/check_for_zombie_jobs storage=File client=client

echo "Backup 1 done"

stop_bareos

## check if first job failed
grep "JobId 1: Fatal error: Client .* RunScript failed." ${tmp}/log1.out 2>&1 >/dev/null
if test $? -eq 0; then
print_debug "First job failed as intended."
else
estat=1
fi

## check if rerun of first job worked and failed itself
grep "JobId 2: Fatal error: Client .* RunScript failed." ${tmp}/log2.out 2>&1 >/dev/null
if test $? -eq 0; then
print_debug "First job failed as intended."
else
estat=2
fi


## check if rerun of failed jobs in last hour worked, we expect to have selected ids 1 and 2
grep "^1,2" ${tmp}/log3.out 2>&1 >/dev/null
if test $? -eq 0; then
print_debug "rerun last hour selected jobs 1 and 2."
else
estat=3
fi

## check if rerun of failed jobs in last day worked, we expect to have selected ids 1 2 3
grep "^1,2,3" ${tmp}/log4.out 2>&1 >/dev/null
if test $? -eq 0; then
print_debug "rerun last hour selected jobs 1,2,3 OK"
else
estat=4
fi


## check if rerun of failed jobs in last day worked, we expect to have selected ids 1 2 3
grep "^1,2,3,4,5,6,7,8" ${tmp}/log5.out 2>&1 >/dev/null
if test $? -eq 0; then
print_debug "rerun last hour selected jobs 1-8 OK"
else
estat=5
fi

stop_bareos

#end_test

0 comments on commit 1076c04

Please sign in to comment.