diff --git a/CHANGELOG.md b/CHANGELOG.md index 9779e79ec26..c7b9f3800f1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,6 +39,7 @@ and since Bareos version 20 this project adheres to [Semantic Versioning](https: - webui: improve localization process [PR #1429] - webui: add machine-readable copyright file [PR #1419] - webui: add config resource graph to analytics (experimental) [PR #1412] +- dird: keep copy and migration control/administrative jobs [PR #1421] ### Removed - remove no longer used pkglists [PR #1335] @@ -114,6 +115,7 @@ and since Bareos version 20 this project adheres to [Semantic Versioning](https: [PR #1414]: https://github.com/bareos/bareos/pull/1414 [PR #1415]: https://github.com/bareos/bareos/pull/1415 [PR #1419]: https://github.com/bareos/bareos/pull/1419 +[PR #1421]: https://github.com/bareos/bareos/pull/1421 [PR #1422]: https://github.com/bareos/bareos/pull/1422 [PR #1424]: https://github.com/bareos/bareos/pull/1424 [PR #1429]: https://github.com/bareos/bareos/pull/1429 diff --git a/core/src/dird/migrate.cc b/core/src/dird/migrate.cc index 1b34c100d98..92ca29f420a 100644 --- a/core/src/dird/migrate.cc +++ b/core/src/dird/migrate.cc @@ -3,7 +3,7 @@ Copyright (C) 2004-2012 Free Software Foundation Europe e.V. Copyright (C) 2011-2016 Planets Communications B.V. - Copyright (C) 2013-2022 Bareos GmbH & Co. KG + Copyright (C) 2013-2023 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or modify it under the terms of version three of the GNU Affero General Public @@ -246,10 +246,8 @@ static inline bool SetMigrationNextPool(JobControlRecord* jcr, PoolResource* pool; const char* storage_source; - /* - * Get the PoolId used with the original job. Then - * find the pool name from the database record. - */ + /* Get the PoolId used with the original job. Then + * find the pool name from the database record. */ pr.PoolId = jcr->dir_impl->jr.PoolId; if (!jcr->db->GetPoolRecord(jcr, &pr)) { Jmsg(jcr, M_FATAL, 0, _("Pool for JobId %s not in database. ERR=%s\n"), @@ -288,11 +286,9 @@ static inline bool SetMigrationNextPool(JobControlRecord* jcr, } } - /* - * If the original backup pool has a NextPool, make sure a + /* If the original backup pool has a NextPool, make sure a * record exists in the database. Note, in this case, we - * will be migrating from pool to pool->NextPool. - */ + * will be migrating from pool to pool->NextPool. */ if (jcr->dir_impl->res.next_pool) { jcr->dir_impl->jr.PoolId = GetOrCreatePoolRecord( jcr, jcr->dir_impl->res.next_pool->resource_name_); @@ -671,11 +667,9 @@ static bool regex_find_jobids(JobControlRecord* jcr, goto bail_out; /* skip regex match */ } - /* - * At this point, we have a list of items in item_chain + /* At this point, we have a list of items in item_chain * that have been matched by the regex, so now we need - * to look up their jobids. - */ + * to look up their jobids. */ ids->count = 0; foreach_dlist (item, item_chain) { Dmsg2(dbglevel, "Got %s: %s\n", type, item->item); @@ -696,9 +690,7 @@ static bool regex_find_jobids(JobControlRecord* jcr, bail_out: Dmsg2(dbglevel, "Count=%d Jobids=%s\n", ids->count, ids->list); - foreach_dlist (item, item_chain) { - free(item->item); - } + foreach_dlist (item, item_chain) { free(item->item); } delete item_chain; return ok; } @@ -1006,24 +998,20 @@ bool DoMigrationInit(JobControlRecord* jcr) return false; } - /* - * Note, at this point, pool is the pool for this job. + /* Note, at this point, pool is the pool for this job. * We transfer it to rpool (read pool), and a bit later, * pool will be changed to point to the write pool, - * which comes from pool->NextPool. - */ + * which comes from pool->NextPool. */ jcr->dir_impl->res.rpool = jcr->dir_impl->res.pool; /* save read pool */ PmStrcpy(jcr->dir_impl->res.rpool_source, jcr->dir_impl->res.pool_source); Dmsg2(dbglevel, "Read pool=%s (From %s)\n", jcr->dir_impl->res.rpool->resource_name_, jcr->dir_impl->res.rpool_source); - /* - * See if this is a control job e.g. the one that selects the Jobs to Migrate + /* See if this is a control job e.g. the one that selects the Jobs to Migrate * or Copy or one of the worker Jobs that do the actual Migration or Copy. If * jcr->dir_impl_->MigrateJobId is set we know that its an actual Migration or - * Copy Job. - */ + * Copy Job. */ if (jcr->dir_impl->MigrateJobId != 0) { Dmsg1(dbglevel, "At Job start previous jobid=%u\n", jcr->dir_impl->MigrateJobId); @@ -1090,35 +1078,27 @@ bool DoMigrationInit(JobControlRecord* jcr) return false; } - /* - * Copy the actual level setting of the previous Job to this Job. + /* Copy the actual level setting of the previous Job to this Job. * This overrides the dummy backup level given to the migrate/copy Job and - * replaces it with the actual level the backup run at. - */ + * replaces it with the actual level the backup run at. */ jcr->setJobLevel(prev_job->JobLevel); - /* - * If the current Job has no explicit client set use the client setting of - * the previous Job. - */ + /* If the current Job has no explicit client set use the client setting of + * the previous Job. */ if (!jcr->dir_impl->res.client && prev_job->client) { jcr->dir_impl->res.client = prev_job->client; if (!jcr->client_name) { jcr->client_name = GetPoolMemory(PM_NAME); } PmStrcpy(jcr->client_name, jcr->dir_impl->res.client->resource_name_); } - /* - * If the current Job has no explicit fileset set use the client setting of - * the previous Job. - */ + /* If the current Job has no explicit fileset set use the client setting of + * the previous Job. */ if (!jcr->dir_impl->res.fileset) { jcr->dir_impl->res.fileset = prev_job->fileset; } - /* - * See if spooling data is not enabled yet. If so turn on spooling if - * requested in job - */ + /* See if spooling data is not enabled yet. If so turn on spooling if + * requested in job */ if (!jcr->dir_impl->spool_data) { jcr->dir_impl->spool_data = job->spool_data; } @@ -1129,8 +1109,7 @@ bool DoMigrationInit(JobControlRecord* jcr) memcpy(&mig_jcr->dir_impl->previous_jr, &jcr->dir_impl->previous_jr, sizeof(mig_jcr->dir_impl->previous_jr)); - /* - * Turn the mig_jcr into a "real" job that takes on the aspects of + /* Turn the mig_jcr into a "real" job that takes on the aspects of * the previous backup job "prev_job". We only don't want it to * ever send any messages to the database or mail messages when * we are doing a migrate or copy to a remote storage daemon. When @@ -1138,8 +1117,7 @@ bool DoMigrationInit(JobControlRecord* jcr) * the remote state and it might want to send some captured state * info on tear down of the mig_jcr so we call SetupJob with the * suppress_output argument set to true (e.g. don't init messages - * and set the jcr suppress_output boolean to true). - */ + * and set the jcr suppress_output boolean to true). */ SetJcrDefaults(mig_jcr, prev_job); // Time value on this Job @@ -1148,10 +1126,8 @@ bool DoMigrationInit(JobControlRecord* jcr) // Don't check for duplicates on migration and copy jobs mig_jcr->dir_impl->IgnoreDuplicateJobChecking = true; - /* - * Copy some overwrites back from the Control Job to the migration and copy - * job. - */ + /* Copy some overwrites back from the Control Job to the migration and copy + * job. */ mig_jcr->dir_impl->spool_data = jcr->dir_impl->spool_data; mig_jcr->dir_impl->spool_size = jcr->dir_impl->spool_size; @@ -1182,21 +1158,17 @@ bool DoMigrationInit(JobControlRecord* jcr) mig_jcr->dir_impl->jr.PoolId = jcr->dir_impl->jr.PoolId; } - /* - * Get the storage that was used for the original Job. + /* Get the storage that was used for the original Job. * This only happens when the original pool used doesn't have an explicit - * storage. - */ + * storage. */ if (!jcr->dir_impl->res.read_storage_list) { CopyRstorage(jcr, prev_job->storage, _("previous Job")); } - /* - * See if the read and write storage is the same. + /* See if the read and write storage is the same. * When they are we do the migrate/copy over one SD connection * otherwise we open a connection to the reading SD and a second - * one to the writing SD. - */ + * one to the writing SD. */ jcr->dir_impl->remote_replicate = !IsSameStorageDaemon( jcr->dir_impl->res.read_storage, jcr->dir_impl->res.write_storage); @@ -1275,10 +1247,8 @@ static inline bool DoActualMigration(JobControlRecord* jcr) Jmsg(jcr, M_INFO, 0, _("Start %s JobId %s, Job=%s\n"), jcr->get_OperationName(), edit_uint64(jcr->JobId, ed1), jcr->Job); - /* - * See if the read storage is paired NDMP storage, if so setup - * the Job to use the native storage instead. - */ + /* See if the read storage is paired NDMP storage, if so setup + * the Job to use the native storage instead. */ if (HasPairedStorage(jcr)) { SetPairedStorage(jcr); } Dmsg2(dbglevel, "Read store=%s, write store=%s\n", @@ -1290,13 +1260,11 @@ static inline bool DoActualMigration(JobControlRecord* jcr) if (jcr->dir_impl->remote_replicate) { alist* write_storage_list; - /* - * See if we need to apply any bandwidth limiting. + /* See if we need to apply any bandwidth limiting. * We search the bandwidth limiting in the following way: * - Job bandwidth limiting * - Writing Storage Daemon bandwidth limiting - * - Reading Storage Daemon bandwidth limiting - */ + * - Reading Storage Daemon bandwidth limiting */ if (jcr->dir_impl->res.job->max_bandwidth > 0) { jcr->max_bandwidth = jcr->dir_impl->res.job->max_bandwidth; } else if (jcr->dir_impl->res.write_storage->max_bandwidth > 0) { @@ -1308,11 +1276,9 @@ static inline bool DoActualMigration(JobControlRecord* jcr) // Open a message channel connection to the Reading Storage daemon. Dmsg0(110, "Open connection with reading storage daemon\n"); - /* - * Clear the write_storage of the jcr and assign it to the mig_jcr so + /* Clear the write_storage of the jcr and assign it to the mig_jcr so * the jcr is connected to the reading storage daemon and the - * mig_jcr to the writing storage daemon. - */ + * mig_jcr to the writing storage daemon. */ mig_jcr->dir_impl->res.write_storage = jcr->dir_impl->res.write_storage; jcr->dir_impl->res.write_storage = NULL; @@ -1379,16 +1345,14 @@ static inline bool DoActualMigration(JobControlRecord* jcr) Dmsg0(150, "Storage daemon connection OK\n"); } - /* - * We re-update the job start record so that the start + /* We re-update the job start record so that the start * time is set after the run before job. This avoids * that any files created by the run before job will * be saved twice. They will be backed up in the current * job, but not in the next one unless they are changed. * Without this, they will be backed up in this job and * in the next job run because in that case, their date - * is after the start of this run. - */ + * is after the start of this run. */ jcr->start_time = time(NULL); jcr->dir_impl->jr.StartTime = jcr->start_time; jcr->dir_impl->jr.JobTDate = jcr->start_time; @@ -1418,11 +1382,9 @@ static inline bool DoActualMigration(JobControlRecord* jcr) mig_jcr->dir_impl->jr.Name, (int)mig_jcr->dir_impl->jr.JobId, mig_jcr->dir_impl->jr.JobType, mig_jcr->dir_impl->jr.JobLevel); - /* - * If we are connected to two different SDs tell the writing one + /* If we are connected to two different SDs tell the writing one * to be ready to receive the data and tell the reading one - * to replicate to the other. - */ + * to replicate to the other. */ if (jcr->dir_impl->remote_replicate) { StorageResource* write_storage = mig_jcr->dir_impl->res.write_storage; StorageResource* read_storage = jcr->dir_impl->res.read_storage; @@ -1431,11 +1393,9 @@ static inline bool DoActualMigration(JobControlRecord* jcr) if (jcr->max_bandwidth > 0) { SendBwlimitToSd(jcr, jcr->Job); } - /* - * Start the job prior to starting the message thread below + /* Start the job prior to starting the message thread below * to avoid two threads from using the BareosSocket structure at - * the same time. - */ + * the same time. */ if (!mig_jcr->store_bsock->fsend("listen")) { goto bail_out; } if (!StartStorageDaemonMessageThread(mig_jcr)) { goto bail_out; } @@ -1460,11 +1420,9 @@ static inline bool DoActualMigration(JobControlRecord* jcr) if (received != OK_replicate) { goto bail_out; } } - /* - * Start the job prior to starting the message thread below + /* Start the job prior to starting the message thread below * to avoid two threads from using the BareosSocket structure at - * the same time. - */ + * the same time. */ if (!jcr->store_bsock->fsend("run")) { goto bail_out; } // Now start a Storage daemon message thread @@ -1473,12 +1431,10 @@ static inline bool DoActualMigration(JobControlRecord* jcr) jcr->setJobStatusWithPriorityCheck(JS_Running); mig_jcr->setJobStatusWithPriorityCheck(JS_Running); - /* - * Pickup Job termination data + /* Pickup Job termination data * Note, the SD stores in jcr->JobFiles/ReadBytes/JobBytes/JobErrors or * mig_jcr->JobFiles/ReadBytes/JobBytes/JobErrors when replicating to - * a remote storage daemon. - */ + * a remote storage daemon. */ if (jcr->dir_impl->remote_replicate) { WaitForStorageDaemonTermination(jcr); WaitForStorageDaemonTermination(mig_jcr); @@ -1502,14 +1458,12 @@ static inline bool DoActualMigration(JobControlRecord* jcr) = jcr->dir_impl->res.write_storage_list; jcr->dir_impl->res.write_storage_list = write_storage_list; - /* - * Undo the clear of the write_storage in the jcr and assign the mig_jcr + /* Undo the clear of the write_storage in the jcr and assign the mig_jcr * write_storage back to the jcr. This is an undo of the clearing we did * earlier as we want the jcr connected to the reading storage daemon and * the mig_jcr to the writing jcr. By clearing the write_storage of the jcr * the ConnectToStorageDaemon function will do the right thing e.g. connect - * the jcrs in the way we want them to. - */ + * the jcrs in the way we want them to. */ jcr->dir_impl->res.write_storage = mig_jcr->dir_impl->res.write_storage; mig_jcr->dir_impl->res.write_storage = NULL; } @@ -1542,11 +1496,9 @@ static inline bool DoMigrationSelection(JobControlRecord* jcr) bool DoMigration(JobControlRecord* jcr) { - /* - * See if this is a control job e.g. the one that selects the Jobs to Migrate + /* See if this is a control job e.g. the one that selects the Jobs to Migrate * or Copy or one of the worker Jobs that do the actual Migration or Copy. If - * jcr->dir_impl_->MigrateJobId is unset we know that its the control job. - */ + * jcr->dir_impl_->MigrateJobId is unset we know that its the control job. */ if (jcr->dir_impl->MigrateJobId == 0) { return DoMigrationSelection(jcr); } else { @@ -1691,15 +1643,11 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) PoolMem query(PM_MESSAGE); Dmsg2(100, "Enter migrate_cleanup %d %c\n", TermCode, TermCode); + UpdateJobEnd(jcr, TermCode); - - /* - * Check if we actually did something. - * mig_jcr is jcr of the newly migrated job. - */ + /* Check if we actually did something. + * mig_jcr is jcr of the newly migrated job. */ if (mig_jcr) { - UpdateJobEnd(jcr, TermCode); - char old_jobid[50], new_jobid[50]; edit_uint64(jcr->dir_impl->previous_jr.JobId, old_jobid); @@ -1711,11 +1659,9 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) jcr->JobId); jcr->db->SqlQuery(query.c_str()); - /* - * See if we used a remote SD if so the mig_jcr contains + /* See if we used a remote SD if so the mig_jcr contains * the jobfiles and jobbytes and the new volsessionid - * and volsessiontime as the writing SD generates this info. - */ + * and volsessiontime as the writing SD generates this info. */ if (jcr->dir_impl->remote_replicate) { mig_jcr->JobFiles = jcr->JobFiles = mig_jcr->dir_impl->SDJobFiles; mig_jcr->JobBytes = jcr->JobBytes = mig_jcr->dir_impl->SDJobBytes; @@ -1749,13 +1695,11 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) switch (jcr->getJobType()) { case JT_MIGRATE: - /* - * If we terminated a Migration Job successfully we should: + /* If we terminated a Migration Job successfully we should: * - Mark the previous job as migrated * - Move any Log records to the new JobId * - Move any MetaData of a NDMP backup - * - Purge the File records from the previous job - */ + * - Purge the File records from the previous job */ Mmsg(query, "UPDATE Job SET Type='%c' WHERE JobId=%s", (char)JT_MIGRATED_JOB, old_jobid); mig_jcr->db->SqlQuery(query.c_str()); @@ -1765,13 +1709,11 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) old_jobid); mig_jcr->db->SqlQuery(query.c_str()); - /* - * If we just migrated a NDMP job, we need to move the file MetaData + /* If we just migrated a NDMP job, we need to move the file MetaData * to the new job. The file MetaData is stored as hardlinks to the * NDMP archive itself. And as we only clone the actual data in the * storage daemon we need to add data normally send to the director - * via the FHDB interface here. - */ + * via the FHDB interface here. */ switch (jcr->dir_impl->res.client->Protocol) { case APT_NDMPV2: case APT_NDMPV3: @@ -1796,25 +1738,21 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) FreeUaContext(ua); break; case JT_COPY: - /* - * If we terminated a Copy Job successfully we should: + /* If we terminated a Copy Job successfully we should: * - Copy any Log records to the new JobId * - Copy any MetaData of a NDMP backup - * - Set type="Job Copy" for the new job - */ + * - Set type="Job Copy" for the new job */ Mmsg(query, "INSERT INTO Log (JobId, Time, LogText ) " "SELECT %s, Time, LogText FROM Log WHERE JobId=%s", new_jobid, old_jobid); mig_jcr->db->SqlQuery(query.c_str()); - /* - * If we just copied a NDMP job, we need to copy the file MetaData + /* If we just copied a NDMP job, we need to copy the file MetaData * to the new job. The file MetaData is stored as hardlinks to the * NDMP archive itself. And as we only clone the actual data in the * storage daemon we need to add data normally send to the director - * via the FHDB interface here. - */ + * via the FHDB interface here. */ switch (jcr->dir_impl->res.client->Protocol) { case APT_NDMPV2: case APT_NDMPV3: @@ -1847,12 +1785,10 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) if (!mig_jcr->db->GetJobVolumeNames(mig_jcr, mig_jcr->dir_impl->jr.JobId, mig_jcr->VolumeName)) { - /* - * Note, if the job has failed, most likely it did not write any + /* Note, if the job has failed, most likely it did not write any * tape, so suppress this "error" message since in that case * it is normal. Or look at it the other way, only for a - * normal exit should we complain about this error. - */ + * normal exit should we complain about this error. */ if (jcr->IsTerminatedOk() && jcr->dir_impl->jr.JobBytes) { Jmsg(jcr, M_ERROR, 0, "%s", mig_jcr->db->strerror()); } @@ -1885,12 +1821,10 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) case JS_FatalError: case JS_ErrorTerminated: case JS_Canceled: - /* - * We catch any error here as the close of the SD sessions is mandatory + /* We catch any error here as the close of the SD sessions is mandatory * for each failure path. The termination message and the message type * can be different so that is why we do a second switch inside the - * switch on the JobStatus. - */ + * switch on the JobStatus. */ switch (jcr->getJobStatus()) { case JS_Canceled: TermMsg = _("%s Canceled"); @@ -1922,9 +1856,6 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) break; } } else if (jcr->dir_impl->HasSelectedJobs) { - Mmsg(query, "DELETE FROM job WHERE JobId=%d", jcr->JobId); - jcr->db->SqlQuery(query.c_str()); - switch (jcr->getJobStatus()) { case JS_Terminated: TermMsg = _("%s OK"); @@ -1945,8 +1876,6 @@ void MigrationCleanup(JobControlRecord* jcr, int TermCode) break; } } else { - Mmsg(query, "DELETE FROM job WHERE JobId=%d", jcr->JobId); - jcr->db->SqlQuery(query.c_str()); TermMsg = _("%s -- no files to %s"); } diff --git a/docs/manuals/source/TasksAndConcepts/MigrationAndCopy.rst b/docs/manuals/source/TasksAndConcepts/MigrationAndCopy.rst index 390f2f65d71..8391fe1cdd3 100644 --- a/docs/manuals/source/TasksAndConcepts/MigrationAndCopy.rst +++ b/docs/manuals/source/TasksAndConcepts/MigrationAndCopy.rst @@ -39,9 +39,9 @@ A migration job can be started manually or from a Schedule, like a backup job. I Normally four jobs are involved during a migration/copy: -- The migration/copy **control job**: This job checks for jobs that need to be copied/migrated and starts a copy/migrate worker job for each of these jobs. The migration/copy control job gets removed from the catalog database at the end of the operation. +- The migration/copy **control job**: This job checks for jobs that need to be copied/migrated and starts a copy/migrate worker job for each of these jobs. -- The migration/copy **worker jobs**: They copy the data of one original job to the resulting destination job. The worker jobs are removed from the database when the destination job (the job that is the result of the copying) is pruned from the catalog database. +- The migration/copy **worker jobs**: They copy the data of one original job to the resulting destination job. The worker jobs are removed from the database when the destination job (the job that is the result of the copying) is pruned from the catalog database, or, in case of a copy, when the new copy is upgraded to become a backup. - The **previous (original) Backup job** (*already run and being copied*): The File records of this job are purged when the migration/copy job terminates successfully. The data remain on the volume until it is recycled. diff --git a/systemtests/tests/copy-migrate/testrunner-02-copy b/systemtests/tests/copy-migrate/testrunner-02-copy index 70b61e75eee..95c5cf04798 100755 --- a/systemtests/tests/copy-migrate/testrunner-02-copy +++ b/systemtests/tests/copy-migrate/testrunner-02-copy @@ -11,44 +11,59 @@ TestName=02-copy #shellcheck source=../scripts/functions . "${rscripts}"/functions -log="$tmp/$TestName.log" +copy_log="$tmp/copy_log.out" query_results="$tmp/query_results.out" -check_log() { - if ! grep -q "$@" "$log"; then - echo "'$*' not found in $log" >&2 - estat=1 - fi -} - start_test cat <"$tmp/bconcmds" @$out /dev/null messages -@$out $log +@$out $copy_log label volume=TestCopyVolume001 storage=File2 pool=FullCopy run copy yes wait messages @$out $query_results sqlquery -SELECT count(jobid) FROM job WHERE type='c'; -SELECT count(jobid) FROM job WHERE type='C'; +SELECT type FROM job WHERE jobid=4; +. +quit +END_OF_DATA + + +run_bconsole "$tmp/bconcmds" + +expect_grep "Termination:.*Copying OK" \ + "$copy_log" \ + "Copy job did not finish well." + +expect_grep "|.*C.*|" \ + "$query_results" \ + "Copy job does not have the expected type." +cat <"$tmp/bconcmds" +@$out $query_results +prune volume=TestVolume001 yes +sqlquery +SELECT count(jobid) FROM job WHERE type='c'; +SELECT type FROM job WHERE jobid=4; +. +messages quit END_OF_DATA run_bconsole "$tmp/bconcmds" -check_for_zombie_jobs storage=File -check_log -F 'Catalog record for Volume "TestCopyVolume001", Slot 0 successfully created.' -check_log 'Termination:.*Copying OK' +expect_grep "| 1 |" \ + "$query_results" \ + "Copy jobs were not deleted appropriately after backup volume pruning." -if [[ $(grep -c "| 1 |" "$query_results") -ne "2" ]]; then - echo "The expected amount of copy jobs in the catalog is not met." - estat=1 -fi +expect_grep "|.*B.*|" \ + "$query_results" \ + "Copy job was not upgraded to backup after original backup was pruned." + +check_for_zombie_jobs storage=File end_test diff --git a/systemtests/tests/copy-migrate/testrunner-03-impossible-copy b/systemtests/tests/copy-migrate/testrunner-03-impossible-copy index bae1fc2627c..243efb24e8d 100755 --- a/systemtests/tests/copy-migrate/testrunner-03-impossible-copy +++ b/systemtests/tests/copy-migrate/testrunner-03-impossible-copy @@ -32,6 +32,9 @@ cat <"$tmp/bconcmds" @$out /dev/null messages @$out $log +run job=backup-bareos-fd level=Full yes +wait +update volume=TestVolume001 volstatus=Used sqlquery UPDATE Media SET mediatype = 'NoSuchType' WHERE volumename = 'TestVolume001'; diff --git a/systemtests/tests/copy-migrate/testrunner-05-migrate b/systemtests/tests/copy-migrate/testrunner-05-migrate index 63306a7a6d5..7dacce75a85 100755 --- a/systemtests/tests/copy-migrate/testrunner-05-migrate +++ b/systemtests/tests/copy-migrate/testrunner-05-migrate @@ -35,15 +35,14 @@ wait messages @$out $query_results sqlquery -SELECT count(jobid) FROM job WHERE type='g'; -SELECT count(jobid) FROM job WHERE type='M'; - +SELECT type from job where jobid in (11,12,13); +. quit END_OF_DATA run_bconsole -expect_grep "The following 3 JobIds were chosen to be migrated: 10,11,12" \ +expect_grep "The following 3 JobIds were chosen to be migrated: 11,12,13" \ "$log" \ "Expected jobs to be migrated do not match." @@ -53,8 +52,8 @@ if [[ $(grep -c "Termination:.*Migration OK" "$log") -ne "4" ]]; then estat=1 fi -if [[ $(grep -c "| 3 |" "$query_results") -ne "2" ]]; then - echo "The necessary amount of migration related jobs are not available in the catalog." +if [[ $(grep -c "|.*M.*|" "$query_results") -ne "3" ]]; then + echo "Former backup jobs were not successfully converted to 'Migrated Jobs'." estat=2 fi @@ -62,39 +61,24 @@ rm -f $query_results cat <"$tmp/bconcmds" @$out $query_results -prune volume=TestVolume002 yes sqlquery -SELECT count(jobid) FROM job WHERE type='M'; - -quit -END_OF_DATA - -run_bconsole - -if ! grep "| 0 |" "$query_results" ; then - echo "Migrated jobs were not deleted appropriately after volume pruning." - estat=3 -fi - -rm -f $query_results - -cat <"$tmp/bconcmds" -@$out $query_results +SELECT count(jobid) FROM job WHERE type='g'; +. update volume=$migration_volume volstatus=Used prune volume=$migration_volume yes sqlquery SELECT count(jobid) FROM job WHERE type='g'; - +. quit END_OF_DATA run_bconsole +expect_grep "| 4 |" \ + "$query_results" \ + "Number of migration admin and worker jobs not met." -if ! grep "| 0 |" "$query_results" ; then - echo "Migration jobs were not deleted appropriately after volume pruning." - estat=4 -fi - - +expect_grep "| 1 |" \ + "$query_results" \ + "Migration jobs were not deleted appropriately after pruning migration volume." end_test