From 53ca1154b9b40fefc88e54f219d7603d22e5fcc6 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 28 Jun 2017 17:41:20 +0200 Subject: [PATCH 01/46] gfapi: Allow to use non-accurate backups with glusterfind With the introduction of glusterfind support we explicitly assumed that accurate mode was always used by the Job doing the backup. This patch allows you to run non accurate backups although best practice is to use accurate mode. --- src/plugins/filed/gfapi-fd.c | 57 +++++++++++++++++++++++------------- 1 file changed, 37 insertions(+), 20 deletions(-) diff --git a/src/plugins/filed/gfapi-fd.c b/src/plugins/filed/gfapi-fd.c index ae23622249d..bb39166b0a1 100644 --- a/src/plugins/filed/gfapi-fd.c +++ b/src/plugins/filed/gfapi-fd.c @@ -135,6 +135,7 @@ struct plugin_ctx { char *next_xattr_name; /* Next xattr name to process */ bool crawl_fs; /* Use local fs crawler to find files to backup */ char *gf_file_list; /* File with list of files generated by glusterfind to backup */ + bool is_accurate; /* Backup has accurate option enabled */ POOLMEM *cwd; /* Current Working Directory */ POOLMEM *next_filename; /* Next filename to save */ POOLMEM *link_target; /* Target symlink points to */ @@ -653,8 +654,10 @@ static bRC get_next_file_to_backup(bpContext *ctx) continue; } *bp++ = '\0'; - urllib_unquote_plus(p_ctx->next_filename); - bfuncs->ClearSeenBitmap(ctx, false, p_ctx->next_filename); + if (p_ctx->is_accurate) { + urllib_unquote_plus(p_ctx->next_filename); + bfuncs->ClearSeenBitmap(ctx, false, p_ctx->next_filename); + } bstrinlinecpy(p_ctx->next_filename, bp); urllib_unquote_plus(p_ctx->next_filename); break; @@ -662,9 +665,11 @@ static bRC get_next_file_to_backup(bpContext *ctx) /* * DELETE means we clear the seen bitmap for this file and continue. */ - bstrinlinecpy(p_ctx->next_filename, p_ctx->next_filename + gf_mapping->compare_size); - urllib_unquote_plus(p_ctx->next_filename); - bfuncs->ClearSeenBitmap(ctx, false, p_ctx->next_filename); + if (p_ctx->is_accurate) { + bstrinlinecpy(p_ctx->next_filename, p_ctx->next_filename + gf_mapping->compare_size); + urllib_unquote_plus(p_ctx->next_filename); + bfuncs->ClearSeenBitmap(ctx, false, p_ctx->next_filename); + } continue; default: Jmsg(ctx, M_ERROR, "Unrecognized glusterfind entry %s\n", p_ctx->next_filename); @@ -1320,7 +1325,7 @@ static inline bool parse_gfapi_devicename(char *devicename, /* * Validate URI. */ - if (!bp || !*bp == '/') { + if (!bp || *(bp + 1) != '/') { goto bail_out; } @@ -1517,6 +1522,16 @@ static bRC setup_backup(bpContext *ctx, void *value) * See if we use an external list with files to backup or should crawl the filesystem ourself. */ if (p_ctx->gf_file_list) { + int accurate; + + /* + * Get the setting for accurate for this Job. + */ + bfuncs->getBareosValue(ctx, bVarAccurate, (void *)&accurate); + if (accurate) { + p_ctx->is_accurate = true; + } + p_ctx->crawl_fs = false; if ((p_ctx->file_list_handle = fopen(p_ctx->gf_file_list, "r")) == (FILE *)NULL) { Jmsg(ctx, M_FATAL, "Failed to open %s for reading files to backup\n", p_ctx->gf_file_list); @@ -1524,21 +1539,23 @@ static bRC setup_backup(bpContext *ctx, void *value) goto bail_out; } - /* - * Mark all files as seen from the previous backup when this is a incremental or differential backup. - * The entries we get from glusterfind are only the changes since that earlier backup. - */ - switch (p_ctx->backup_level) { - case L_INCREMENTAL: - case L_DIFFERENTIAL: - if (bfuncs->SetSeenBitmap(ctx, true, NULL) != bRC_OK) { - Jmsg(ctx, M_FATAL, "Failed to enable all entries in Seen bitmap, not an accurate backup ?\n"); - Dmsg(ctx, dbglvl, "Failed to enable all entries in Seen bitmap, not an accurate backup ?\n"); - goto bail_out; + if (p_ctx->is_accurate) { + /* + * Mark all files as seen from the previous backup when this is a incremental or differential backup. + * The entries we get from glusterfind are only the changes since that earlier backup. + */ + switch (p_ctx->backup_level) { + case L_INCREMENTAL: + case L_DIFFERENTIAL: + if (bfuncs->SetSeenBitmap(ctx, true, NULL) != bRC_OK) { + Jmsg(ctx, M_FATAL, "Failed to enable all entries in Seen bitmap, not an accurate backup ?\n"); + Dmsg(ctx, dbglvl, "Failed to enable all entries in Seen bitmap, not an accurate backup ?\n"); + goto bail_out; + } + break; + default: + break; } - break; - default: - break; } /* From bc43e15ba518d24d0782eb55c3b4858c67455160 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 28 Jun 2017 17:41:26 +0200 Subject: [PATCH 02/46] gfapi: Fix backups with empty glusterfind filelist. The original plugin interface always expect one or more files to be backuped. With glusterfind it can happen that no files changed since the last backup which means when we do a setup_backup we return bRC_OK instead of bRC_More which normally means there are more files to backup (for determining what file needs to be processed next we use get_next_file_to_backup() which is also used in the endBackupFile() plugin function. Problem is however returning bRC_OK has different meaning when used in bEventBackupCommand e.g. start of backup vs endBackupFile() there is a nice and smart workaround and that is that we know normally get_next_file_to_backup() should always return bRC_More and only on an empty list a bRC_OK is returned. The way the initialization works we can return anything other then bRC_OK (bRC_Skip in our case) which means the loop never gets executed. As we now never get into the backup loop we also are no longer bitten by the side effect you see on backing up an empty file list e.g. failing backups due to the fact that the backup expects one or more valid so called savepackets. When the filelist is empty a savepacket with as sp_type zero is returned and that leads to the following error: 'no type in startBackupFile packet.' So the problem manifested itself as a way different error then what actually is the real problem. Corner case closed. --- src/filed/fd_plugins.c | 6 ++---- src/plugins/filed/gfapi-fd.c | 24 ++++++++++++++++++++---- 2 files changed, 22 insertions(+), 8 deletions(-) diff --git a/src/filed/fd_plugins.c b/src/filed/fd_plugins.c index 347fb9f28f3..caf5dc6b2ba 100644 --- a/src/filed/fd_plugins.c +++ b/src/filed/fd_plugins.c @@ -716,8 +716,7 @@ int plugin_save(JCR *jcr, FF_PKT *ff_pkt, bool top_level) } if (sp.type == 0) { - Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no type in startBackupFile packet.\n"), - cmd); + Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no type in startBackupFile packet.\n"), cmd); goto bail_out; } @@ -943,8 +942,7 @@ int plugin_estimate(JCR *jcr, FF_PKT *ff_pkt, bool top_level) } if (sp.type == 0) { - Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no type in startBackupFile packet.\n"), - cmd); + Jmsg1(jcr, M_FATAL, 0, _("Command plugin \"%s\": no type in startBackupFile packet.\n"), cmd); goto bail_out; } diff --git a/src/plugins/filed/gfapi-fd.c b/src/plugins/filed/gfapi-fd.c index bb39166b0a1..42bf50e0394 100644 --- a/src/plugins/filed/gfapi-fd.c +++ b/src/plugins/filed/gfapi-fd.c @@ -1508,6 +1508,7 @@ static bRC connect_to_gluster(bpContext *ctx, bool is_backup) */ static bRC setup_backup(bpContext *ctx, void *value) { + bRC retval = bRC_Error; plugin_ctx *p_ctx = (plugin_ctx *)ctx->pContext; if (!p_ctx || !value) { @@ -1563,9 +1564,24 @@ static bRC setup_backup(bpContext *ctx, void *value) * As we need to get it from the gfflilelist we use get_next_file_to_backup() * to do the setup for us it retrieves the entry and does a setup of filetype etc. */ - if (get_next_file_to_backup(ctx) == bRC_Error) { + switch (get_next_file_to_backup(ctx)) { + case bRC_OK: + /* + * get_next_file_to_backup() normally returns bRC_More to indicate that there are + * more files to backup. But when using glusterfind we use an external filelist which + * could be empty in that special case we get bRC_OK back from get_next_file_to_backup() + * and then only in setup_backup() we return bRC_Skip which will skip processing of any + * more files to backup. + */ + retval = bRC_Skip; + break; + case bRC_Error: Jmsg(ctx, M_FATAL, "Failed to get first file to backup\n"); Dmsg(ctx, dbglvl, "Failed to get first file to backup\n"); + goto bail_out; + default: + retval = bRC_OK; + break; } } else { p_ctx->crawl_fs = true; @@ -1605,12 +1621,12 @@ static bRC setup_backup(bpContext *ctx, void *value) } else { pm_strcpy(p_ctx->next_filename, "/"); } - } - return bRC_OK; + retval = bRC_OK; + } bail_out: - return bRC_Error; + return retval; } /* From fde6778b1b755e1e2ddbed2b575aaa36d7114a7a Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 28 Jun 2017 17:41:32 +0200 Subject: [PATCH 03/46] gfapi: Explicitly close glfs fd on IO-open. Close the glfs fd handle on an IO_OPEN when it was not closed for whatever reason. --- src/plugins/filed/gfapi-fd.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/plugins/filed/gfapi-fd.c b/src/plugins/filed/gfapi-fd.c index 42bf50e0394..bdc94855069 100644 --- a/src/plugins/filed/gfapi-fd.c +++ b/src/plugins/filed/gfapi-fd.c @@ -1664,6 +1664,13 @@ static bRC pluginIO(bpContext *ctx, struct io_pkt *io) switch(io->func) { case IO_OPEN: + /* + * Close the gfd when it was not closed before. + */ + if (p_ctx->gfd) { + glfs_close(p_ctx->gfd); + } + if (io->flags & (O_CREAT | O_WRONLY)) { p_ctx->gfd = glfs_creat(p_ctx->glfs, io->fname, io->flags, io->mode); } else { From a5aa26180754ec8df5acc82eb077eb501617500c Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 28 Jun 2017 17:41:40 +0200 Subject: [PATCH 04/46] gfapi: Don't reinitialize the connection to gluster The plugin event bEventRestoreCommand is fired for each file we are supposed to restore, currently we don't check if we already have setup the connection to gluster using the function connect_to_gluster() and as such we leak a set of socket connections for each file restored. This is bad so lets properly check if things are already setup correctly. For a backup we do the same e.g. check if p_ctx->glfs is not already initialized. --- src/plugins/filed/gfapi-fd.c | 56 +++++++++++++++++++++++++++++++++--- 1 file changed, 52 insertions(+), 4 deletions(-) diff --git a/src/plugins/filed/gfapi-fd.c b/src/plugins/filed/gfapi-fd.c index bdc94855069..13c53816a58 100644 --- a/src/plugins/filed/gfapi-fd.c +++ b/src/plugins/filed/gfapi-fd.c @@ -1,7 +1,7 @@ /* BAREOS® - Backup Archiving REcovery Open Sourced - Copyright (C) 2014-2016 Planets Communications B.V. + Copyright (C) 2014-2017 Planets Communications B.V. Copyright (C) 2014-2016 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or @@ -121,6 +121,7 @@ struct plugin_ctx { int32_t backup_level; /* Backup level e.g. Full/Differential/Incremental */ utime_t since; /* Since time for Differential/Incremental */ char *plugin_options; /* Options passed to plugin */ + char *plugin_definition; /* Previous plugin definition passed to plugin */ char *gfapi_volume_spec; /* Unparsed Gluster volume specification */ char *transport; /* Gluster transport protocol to management server */ char *servername; /* Gluster management server */ @@ -439,6 +440,10 @@ static bRC freePlugin(bpContext *ctx) free(p_ctx->gfapi_volume_spec); } + if (p_ctx->plugin_definition) { + free(p_ctx->plugin_definition); + } + if (p_ctx->plugin_options) { free(p_ctx->plugin_options); } @@ -1060,6 +1065,25 @@ static bRC parse_plugin_definition(bpContext *ctx, void *value) return bRC_Error; } + /* + * See if we already got some plugin definition before and its exactly the same. + */ + if (p_ctx->plugin_definition) { + if (bstrcmp(p_ctx->plugin_definition, (char *)value)) { + return bRC_OK; + } + + free(p_ctx->plugin_definition); + } + + /* + * Keep track of the last processed plugin definition. + */ + p_ctx->plugin_definition = bstrdup((char *)value); + + /* + * Keep overrides passed in via pluginoptions. + */ keep_existing = (p_ctx->plugin_options) ? true : false; /* @@ -1462,6 +1486,14 @@ static bRC connect_to_gluster(bpContext *ctx, bool is_backup) return bRC_Error; } + /* + * If we get called and we already have a handle to gfapi we should tear it down. + */ + if (p_ctx->glfs) { + glfs_fini(p_ctx->glfs); + p_ctx->glfs = NULL; + } + p_ctx->glfs = glfs_new(p_ctx->volumename); if (!p_ctx->glfs) { goto bail_out; @@ -1515,6 +1547,16 @@ static bRC setup_backup(bpContext *ctx, void *value) goto bail_out; } + /* + * If we are already having a handle to gfapi and we are getting the + * same plugin definition there is no need to tear down the whole stuff and + * setup exactly the same. + */ + if (p_ctx->glfs && + bstrcmp((char *)value, p_ctx->plugin_definition)) { + return bRC_OK; + } + if (connect_to_gluster(ctx, true) != bRC_OK) { goto bail_out; } @@ -1640,11 +1682,17 @@ static bRC setup_restore(bpContext *ctx, void *value) return bRC_Error; } - if (connect_to_gluster(ctx, false) != bRC_OK) { - return bRC_Error; + /* + * If we are already having a handle to gfapi and we are getting the + * same plugin definition there is no need to tear down the whole stuff and + * setup exactly the same. + */ + if (p_ctx->glfs && + bstrcmp((char *)value, p_ctx->plugin_definition)) { + return bRC_OK; } - return bRC_OK; + return connect_to_gluster(ctx, false); } /* From ec32843f9158006f334cb162c32b1be249d8c72b Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 28 Jun 2017 17:41:46 +0200 Subject: [PATCH 05/46] cephfs: Don't reinitialize the connection to CEPH. The plugin event bEventRestoreCommand is fired for each file we are supposed to restore, currently we don't check if we already have setup the connection to CEPH using the function connect_to_cephfs() and as such we leak a set of socket connections for each file restored. This is bad so lets properly check if things are already setup correctly. For a backup we do the same e.g. check if p_ctx->cmount is not already initialized. --- src/plugins/filed/cephfs-fd.c | 51 ++++++++++++++++++++++++++++++++--- 1 file changed, 48 insertions(+), 3 deletions(-) diff --git a/src/plugins/filed/cephfs-fd.c b/src/plugins/filed/cephfs-fd.c index 627311af1bb..364c47511f0 100644 --- a/src/plugins/filed/cephfs-fd.c +++ b/src/plugins/filed/cephfs-fd.c @@ -122,6 +122,7 @@ struct plugin_ctx { int32_t backup_level; /* Backup level e.g. Full/Differential/Incremental */ utime_t since; /* Since time for Differential/Incremental */ char *plugin_options; /* Options passed to plugin */ + char *plugin_definition; /* Previous plugin definition passed to plugin */ char *conffile; /* Configfile to read to be able to connect to CEPHFS */ char *basedir; /* Basedir to start backup in */ char flags[FOPTS_BYTES]; /* Bareos internal flags */ @@ -307,6 +308,10 @@ static bRC freePlugin(bpContext *ctx) free(p_ctx->conffile); } + if (p_ctx->plugin_definition) { + free(p_ctx->plugin_definition); + } + if (p_ctx->plugin_options) { free(p_ctx->plugin_options); } @@ -825,6 +830,22 @@ static bRC parse_plugin_definition(bpContext *ctx, void *value) return bRC_Error; } + /* + * See if we already got some plugin definition before and its exactly the same. + */ + if (p_ctx->plugin_definition) { + if (bstrcmp(p_ctx->plugin_definition, (char *)value)) { + return bRC_OK; + } + + free(p_ctx->plugin_definition); + } + + /* + * Keep track of the last processed plugin definition. + */ + p_ctx->plugin_definition = bstrdup((char *)value); + keep_existing = (p_ctx->plugin_options) ? true : false; /* @@ -940,6 +961,14 @@ static bRC connect_to_cephfs(bpContext *ctx) int status; plugin_ctx *p_ctx = (plugin_ctx *)ctx->pContext; + /* + * If we get called and we already have a handle to cephfs we should tear it down. + */ + if (p_ctx->cmount) { + ceph_shutdown(p_ctx->cmount); + p_ctx->cmount = NULL; + } + status = ceph_create(&p_ctx->cmount, NULL); if (status < 0) { berrno be; @@ -982,6 +1011,16 @@ static bRC setup_backup(bpContext *ctx, void *value) return bRC_Error; } + /* + * If we are already having a handle to cepfs and we are getting the + * same plugin definition there is no need to tear down the whole stuff and + * setup exactly the same. + */ + if (p_ctx->cmount && + bstrcmp((char *)value, p_ctx->plugin_definition)) { + return bRC_OK; + } + if (connect_to_cephfs(ctx) != bRC_OK) { return bRC_Error; } @@ -1012,11 +1051,17 @@ static bRC setup_restore(bpContext *ctx, void *value) return bRC_Error; } - if (connect_to_cephfs(ctx) != bRC_OK) { - return bRC_Error; + /* + * If we are already having a handle to cepfs and we are getting the + * same plugin definition there is no need to tear down the whole stuff and + * setup exactly the same. + */ + if (p_ctx->cmount && + bstrcmp((char *)value, p_ctx->plugin_definition)) { + return bRC_OK; } - return bRC_OK; + return connect_to_cephfs(ctx); } /* From 56fe5d9c4ffd7ffb5872a37ff093b1ae9a0a06e3 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 28 Jun 2017 17:41:55 +0200 Subject: [PATCH 06/46] build: Update copyright. Claim copyright for all changes done over the years. Copyright statements are already in most source files but not in the generic version header and the command output. So essentially this doesn't change anything already not in effect already just makes it more prominent. Also added the BAREOS copyright which was missing in the comment section of version.h --- src/include/version.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/include/version.h b/src/include/version.h index 86feeaddfd9..4647e1aa02f 100644 --- a/src/include/version.h +++ b/src/include/version.h @@ -4,6 +4,7 @@ #define LSMDATE "09Oct17" #define PROG_COPYRIGHT "Copyright (C) %d-2012 Free Software Foundation Europe e.V.\n" \ + "Copyright (C) 2010-2017 Planets Communications B.V.\n" \ "Copyright (C) 2013-2017 Bareos GmbH & Co. KG\n" #define BYEAR "2017" /* year for copyright messages in programs */ @@ -11,6 +12,8 @@ BAREOS® - Backup Archiving REcovery Open Sourced Copyright (C) 2000-2013 Free Software Foundation Europe e.V. + Copyright (C) 2010-2017 Planets Communications B.V. + Copyright (C) 2013-2017 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or modify it under the terms of version three of the GNU Affero General Public From 2ef36e5f53aceffe6faeee964aa2583aefd2d7a6 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Sat, 1 Jul 2017 23:22:45 +0200 Subject: [PATCH 07/46] stored: Use SEEK_CUR not SEEK_END to check positioning. When we want to determine if the position is set right in DCR::is_eod_valid() we need to perform a lseek(fd, 0, SEEK_CUR) and not a lseek(fd, 0, SEEK_END) as that is already done in DEVICE::eod(). We only want to determine if the lseek was done right in DCR::is_eod_valid() and by doing a SEEK_END we do the same seek twice which leads to the same result but is not correct. --- src/stored/mount.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/stored/mount.c b/src/stored/mount.c index 06a6b27ecfb..7af3326c233 100644 --- a/src/stored/mount.c +++ b/src/stored/mount.c @@ -690,7 +690,7 @@ bool DCR::is_eod_valid() char ed1[50], ed2[50]; boffset_t pos; - pos = dev->lseek(dcr, (boffset_t)0, SEEK_END); + pos = dev->lseek(dcr, (boffset_t)0, SEEK_CUR); if (dev->VolCatInfo.VolCatBytes == (uint64_t)pos) { Jmsg(jcr, M_INFO, 0, _("Ready to append to end of Volume \"%s\"" " size=%s\n"), From 600fe779db328a2143b7322264d1c6649d04cc54 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Tue, 3 Oct 2017 15:07:13 +0200 Subject: [PATCH 08/46] gfapi-fd: Fix parsing of missing basedir argument. --- src/plugins/filed/gfapi-fd.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/plugins/filed/gfapi-fd.c b/src/plugins/filed/gfapi-fd.c index 13c53816a58..2d75850f1a7 100644 --- a/src/plugins/filed/gfapi-fd.c +++ b/src/plugins/filed/gfapi-fd.c @@ -159,6 +159,7 @@ enum plugin_argument_type { argument_none, argument_volume_spec, argument_snapdir, + argument_basedir, argument_gf_file_list }; @@ -170,6 +171,7 @@ struct plugin_argument { static plugin_argument plugin_arguments[] = { { "volume", argument_volume_spec }, { "snapdir", argument_snapdir }, + { "basedir", argument_basedir }, { "gffilelist", argument_gf_file_list }, { NULL, argument_none } }; @@ -1151,6 +1153,9 @@ static bRC parse_plugin_definition(bpContext *ctx, void *value) case argument_snapdir: str_destination = &p_ctx->snapdir; break; + case argument_basedir: + str_destination = &p_ctx->basedir; + break; case argument_gf_file_list: str_destination = &p_ctx->gf_file_list; break; From 2132a59d44e1e95ee6f292f508ab3fdbdcdf05a4 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 28 Jun 2017 17:41:14 +0200 Subject: [PATCH 09/46] debug: Print open flags as %08o and mode as %04o In debug message printing open flags as octal and 4 bytes long delivers much better readable stuff as you get things line 0640 which is sane as that is also something you can give to lets say chmod. Open flags like O_CREAT, O_READONLY etc are also defined as octal values of 8 bytes so lets also print them so you can easily grep them from the proper include files and they look somewhat familiar. Before those were sometimes printed as decimal and sometimes as hexadecimal so that always leads to converting. --- src/filed/accurate.c | 5 +++-- src/filed/fd_plugins.c | 2 +- src/findlib/bfile.c | 4 ++-- src/findlib/create_file.c | 6 +++--- src/plugins/filed/python-fd.c | 8 ++++---- src/stored/dev.c | 8 ++++---- 6 files changed, 17 insertions(+), 16 deletions(-) diff --git a/src/filed/accurate.c b/src/filed/accurate.c index e14e0cec631..4ce6b4fced5 100644 --- a/src/filed/accurate.c +++ b/src/filed/accurate.c @@ -224,8 +224,9 @@ bool accurate_check_file(JCR *jcr, FF_PKT *ff_pkt) * Backup only the attribute stream */ if (statc.st_mode != ff_pkt->statp.st_mode) { - Dmsg3(dbglvl-1, "%s st_mode differ. Cat: %x File: %x\n", - fname, (uint32_t)statc.st_mode, (uint32_t)ff_pkt->statp.st_mode); + Dmsg3(dbglvl-1, "%s st_mode differ. Cat: %04o File: %04o\n", + fname, (uint32_t)(statc.st_mode & ~S_IFMT), + (uint32_t)(ff_pkt->statp.st_mode & ~S_IFMT)); status = true; } break; diff --git a/src/filed/fd_plugins.c b/src/filed/fd_plugins.c index caf5dc6b2ba..efd3879de65 100644 --- a/src/filed/fd_plugins.c +++ b/src/filed/fd_plugins.c @@ -1848,7 +1848,7 @@ static int my_plugin_bopen(BFILE *bfd, const char *fname, int flags, mode_t mode struct io_pkt io; JCR *jcr = bfd->jcr; - Dmsg1(dbglvl, "plugin_bopen flags=%x\n", flags); + Dmsg1(dbglvl, "plugin_bopen flags=%08o\n", flags); if (!jcr->plugin_ctx) { return 0; } diff --git a/src/findlib/bfile.c b/src/findlib/bfile.c index 9a4615c5cca..9788950c252 100644 --- a/src/findlib/bfile.c +++ b/src/findlib/bfile.c @@ -758,7 +758,7 @@ static inline int bopen_nonencrypted(BFILE *bfd, const char *fname, int flags, m int bopen(BFILE *bfd, const char *fname, int flags, mode_t mode, dev_t rdev) { - Dmsg4(100, "bopen: fname %s, flags %d, mode %d, rdev %u\n", fname, flags, mode, rdev); + Dmsg4(100, "bopen: fname %s, flags %08o, mode %04o, rdev %u\n", fname, flags, (mode & ~S_IFMT), rdev); /* * If the FILE_ATTRIBUTES_DEDUPED_ITEM bit is set this is a deduped file @@ -1096,7 +1096,7 @@ bool is_restore_stream_supported(int stream) int bopen(BFILE *bfd, const char *fname, int flags, mode_t mode, dev_t rdev) { - Dmsg4(100, "bopen: fname %s, flags %d, mode %d, rdev %u\n", fname, flags, mode, rdev); + Dmsg4(100, "bopen: fname %s, flags %08o, mode %04o, rdev %u\n", fname, flags, (mode & ~S_IFMT), rdev); if (bfd->cmd_plugin && plugin_bopen) { Dmsg1(400, "call plugin_bopen fname=%s\n", fname); diff --git a/src/findlib/create_file.c b/src/findlib/create_file.c index 0fc01340753..e70f6eedbaa 100644 --- a/src/findlib/create_file.c +++ b/src/findlib/create_file.c @@ -81,7 +81,7 @@ int create_file(JCR *jcr, ATTR *attr, BFILE *bfd, int replace) } new_mode = attr->statp.st_mode; - Dmsg3(200, "type=%d newmode=%x file=%s\n", attr->type, new_mode, attr->ofname); + Dmsg3(200, "type=%d newmode=%04o file=%s\n", attr->type, (new_mode & ~S_IFMT), attr->ofname); parent_mode = S_IWUSR | S_IXUSR | new_mode; gid = attr->statp.st_gid; uid = attr->statp.st_uid; @@ -298,7 +298,7 @@ int create_file(JCR *jcr, ATTR *attr, BFILE *bfd, int replace) if (is_bopen(bfd)) { Qmsg1(jcr, M_ERROR, 0, _("bpkt already open fid=%d\n"), bfd->fid); } - Dmsg2(400, "open %s flags=0x%x\n", attr->ofname, flags); + Dmsg2(400, "open %s flags=%08o\n", attr->ofname, flags); if ((bopen(bfd, attr->ofname, flags, 0, 0)) < 0) { berrno be; be.set_errno(bfd->berrno); @@ -414,7 +414,7 @@ int create_file(JCR *jcr, ATTR *attr, BFILE *bfd, int replace) */ case FT_DIRBEGIN: case FT_DIREND: - Dmsg2(200, "Make dir mode=%o dir=%s\n", new_mode, attr->ofname); + Dmsg2(200, "Make dir mode=%04o dir=%s\n", (new_mode & ~S_IFMT), attr->ofname); if (!makepath(attr, attr->ofname, new_mode, parent_mode, uid, gid, 0)) { return CF_ERROR; } diff --git a/src/plugins/filed/python-fd.c b/src/plugins/filed/python-fd.c index 962863ca008..7273343e3c0 100644 --- a/src/plugins/filed/python-fd.c +++ b/src/plugins/filed/python-fd.c @@ -3330,10 +3330,10 @@ static PyObject *PyStatPacket_repr(PyStatPacket *self) PyObject *s; POOL_MEM buf(PM_MESSAGE); - Mmsg(buf, "StatPacket(dev=%ld, ino=%lld, mode=%d, nlink=%d, " + Mmsg(buf, "StatPacket(dev=%ld, ino=%lld, mode=%04o, nlink=%d, " "uid=%ld, gid=%ld, rdev=%ld, size=%lld, " "atime=%ld, mtime=%ld, ctime=%ld, blksize=%ld, blocks=%lld)", - self->dev, self->ino, self->mode, self->nlink, + self->dev, self->ino, (self->mode & ~S_IFMT), self->nlink, self->uid, self->gid, self->rdev, self->size, self->atime, self->mtime, self->ctime, self->blksize, self->blocks); @@ -3658,10 +3658,10 @@ static PyObject *PyIoPacket_repr(PyIoPacket *self) PyObject *s; POOL_MEM buf(PM_MESSAGE); - Mmsg(buf, "IoPacket(func=%d, count=%ld, flags=%ld, mode=%ld, " + Mmsg(buf, "IoPacket(func=%d, count=%ld, flags=%ld, mode=%04o, " "buf=\"%s\", fname=\"%s\", status=%ld, io_errno=%ld, lerror=%ld, " "whence=%ld, offset=%lld, win32=%d)", - self->func, self->count, self->flags, self->mode, + self->func, self->count, self->flags, (self->mode & ~S_IFMT), PyGetByteArrayValue(self->buf), self->fname, self->status, self->io_errno, self->lerror, self->whence, self->offset, self->win32); diff --git a/src/stored/dev.c b/src/stored/dev.c index a282b4a5597..539eb67d233 100644 --- a/src/stored/dev.c +++ b/src/stored/dev.c @@ -161,8 +161,8 @@ static inline DEVICE *m_init_dev(JCR *jcr, DEVRES *device, bool new_init) device->dev_type = B_FIFO_DEV; } else if (!bit_is_set(CAP_REQMOUNT, device->cap_bits)) { Jmsg2(jcr, M_ERROR, 0, - _("%s is an unknown device type. Must be tape or directory, st_mode=%x\n"), - device->device_name, statp.st_mode); + _("%s is an unknown device type. Must be tape or directory, st_mode=%04o\n"), + device->device_name, (statp.st_mode & ~S_IFMT)); return NULL; } } @@ -586,7 +586,7 @@ bool DEVICE::open(DCR *dcr, int omode) */ clone_bits(ST_MAX, preserve, state); - Dmsg2(100, "preserve=0x%x fd=%d\n", preserve, m_fd); + Dmsg2(100, "preserve=%08o fd=%d\n", preserve, m_fd); return m_fd >= 0; } @@ -651,7 +651,7 @@ void DEVICE::open_device(DCR *dcr, int omode) /* * If creating file, give 0640 permissions */ - Dmsg3(100, "open disk: mode=%s open(%s, 0x%x, 0640)\n", mode_to_str(omode), + Dmsg3(100, "open disk: mode=%s open(%s, %08o, 0640)\n", mode_to_str(omode), archive_name.c_str(), oflags); if ((m_fd = d_open(archive_name.c_str(), oflags, 0640)) < 0) { From 9c13a42833947b7926c68ebd84b641e50d26e6f2 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Sat, 1 Jul 2017 23:22:45 +0200 Subject: [PATCH 10/46] lib: Add edit_pthread() function to print pthread_t ids. There is no real clean way to print the pthread_t type returned by various pthreads functions. Until now things were printed as pointer addresses. The edit_pthread() method is analog to other edit functions and tries to print things in an uniform way and should be used to print the content of the pthread_t opaque structure. The benefit of this is that the representation is in one central place and we can implement specific representations for different platforms. As part of the implementation of this new function the edit.c source file was also refactored and reindented to be better readable. Further more now all modifiers are stored as static const char pointers which used to be done for some functions but also not in others. --- src/lib/btimers.c | 33 ++++-- src/lib/edit.c | 261 ++++++++++++++++++++++++++++++++-------------- src/lib/jcr.c | 23 ++-- src/lib/lockmgr.c | 16 +-- src/lib/protos.h | 1 + src/lib/res.c | 15 ++- src/stored/lock.c | 15 ++- 7 files changed, 242 insertions(+), 122 deletions(-) diff --git a/src/lib/btimers.c b/src/lib/btimers.c index e6965c16f87..d69314c8b35 100644 --- a/src/lib/btimers.c +++ b/src/lib/btimers.c @@ -130,22 +130,25 @@ static void callback_child_timer(watchdog_t *self) */ btimer_t *start_thread_timer(JCR *jcr, pthread_t tid, uint32_t wait) { + char ed1[50]; btimer_t *wid; + wid = btimer_start_common(wait); if (wid == NULL) { Dmsg1(dbglvl, "start_thread_timer return NULL from common. wait=%d.\n", wait); return NULL; } + wid->type = TYPE_PTHREAD; wid->tid = tid; wid->jcr = jcr; - wid->wd->callback = callback_thread_timer; wid->wd->one_shot = true; wid->wd->interval = wait; register_watchdog(wid->wd); - Dmsg3(dbglvl, "Start thread timer %p tid %p for %d secs.\n", wid, tid, wait); + Dmsg3(dbglvl, "Start thread timer %p tid %s for %d secs.\n", + wid, edit_pthread(tid, ed1, sizeof(ed1)), wait); return wid; } @@ -158,14 +161,18 @@ btimer_t *start_thread_timer(JCR *jcr, pthread_t tid, uint32_t wait) */ btimer_t *start_bsock_timer(BSOCK *bsock, uint32_t wait) { + char ed1[50]; btimer_t *wid; + if (wait <= 0) { /* wait should be > 0 */ return NULL; } + wid = btimer_start_common(wait); if (wid == NULL) { return NULL; } + wid->type = TYPE_BSOCK; wid->tid = pthread_self(); wid->bsock = bsock; @@ -176,8 +183,8 @@ btimer_t *start_bsock_timer(BSOCK *bsock, uint32_t wait) wid->wd->interval = wait; register_watchdog(wid->wd); - Dmsg4(dbglvl, "Start bsock timer %p tid=%p for %d secs at %d\n", wid, - wid->tid, wait, time(NULL)); + Dmsg4(dbglvl, "Start bsock timer %p tid=%s for %d secs at %d\n", + wid, edit_pthread(wid->tid, ed1, sizeof(ed1)), wait, time(NULL)); return wid; } @@ -187,11 +194,15 @@ btimer_t *start_bsock_timer(BSOCK *bsock, uint32_t wait) */ void stop_bsock_timer(btimer_t *wid) { + char ed1[50]; + if (wid == NULL) { Dmsg0(900, "stop_bsock_timer called with NULL btimer_id\n"); return; } - Dmsg3(dbglvl, "Stop bsock timer %p tid=%p at %d.\n", wid, wid->tid, time(NULL)); + + Dmsg3(dbglvl, "Stop bsock timer %p tid=%s at %d.\n", + wid, edit_pthread(wid->tid, ed1, sizeof(ed1)), time(NULL)); stop_btimer(wid); } @@ -201,11 +212,15 @@ void stop_bsock_timer(btimer_t *wid) */ void stop_thread_timer(btimer_t *wid) { + char ed1[50]; + if (wid == NULL) { Dmsg0(dbglvl, "stop_thread_timer called with NULL btimer_id\n"); return; } - Dmsg2(dbglvl, "Stop thread timer %p tid=%p.\n", wid, wid->tid); + + Dmsg2(dbglvl, "Stop thread timer %p tid=%s.\n", + wid, edit_pthread(wid->tid, ed1, sizeof(ed1))); stop_btimer(wid); } @@ -220,12 +235,14 @@ static void destructor_thread_timer(watchdog_t *self) static void callback_thread_timer(watchdog_t *self) { + char ed1[50]; btimer_t *wid = (btimer_t *)self->data; Dmsg4(dbglvl, "thread timer %p kill %s tid=%p at %d.\n", self, - wid->type == TYPE_BSOCK ? "bsock" : "thread", wid->tid, time(NULL)); + wid->type == TYPE_BSOCK ? "bsock" : "thread", + edit_pthread(wid->tid, ed1, sizeof(ed1)), time(NULL)); if (wid->jcr) { - Dmsg2(dbglvl, "killed jid=%u Job=%s\n", wid->jcr->JobId, wid->jcr->Job); + Dmsg2(dbglvl, "killed JobId=%u Job=%s\n", wid->jcr->JobId, wid->jcr->Job); } if (wid->type == TYPE_BSOCK && wid->bsock) { diff --git a/src/lib/edit.c b/src/lib/edit.c index 3e6667d1cc3..71bcca03d91 100644 --- a/src/lib/edit.c +++ b/src/lib/edit.c @@ -29,7 +29,11 @@ #include "bareos.h" #include -/* We assume ASCII input and don't worry about overflow */ +#define DEFAULT_FORMAT_LENGTH 27 + +/* + * We assume ASCII input and don't worry about overflow + */ uint64_t str_to_uint64(const char *str) { const char *p = str; @@ -38,16 +42,20 @@ uint64_t str_to_uint64(const char *str) if (!p) { return 0; } + while (B_ISSPACE(*p)) { p++; } + if (*p == '+') { p++; } + while (B_ISDIGIT(*p)) { value = B_TIMES10(value) + *p - '0'; p++; } + return value; } @@ -60,50 +68,62 @@ int64_t str_to_int64(const char *str) if (!p) { return 0; } + while (B_ISSPACE(*p)) { p++; } + if (*p == '+') { p++; } else if (*p == '-') { negative = true; p++; } + value = str_to_uint64(p); if (negative) { value = -value; } + return value; } /* - * Edit an integer number with commas, the supplied buffer - * must be at least 27 bytes long. The incoming number - * is always widened to 64 bits. + * Edit an integer number with commas, the supplied buffer must be at least + * DEFAULT_FORMAT_LENGTH bytes long. The incoming number is always widened to 64 bits. */ char *edit_uint64_with_commas(uint64_t val, char *buf) { edit_uint64(val, buf); + return add_commas(buf, buf); } /* - * Edit an integer into "human-readable" format with four or fewer - * significant digits followed by a suffix that indicates the scale - * factor. The buf array inherits a 27 byte minimim length - * requirement from edit_unit64_with_commas(), although the output - * string is limited to eight characters. + * Edit an integer into "human-readable" format with four or fewer significant digits + * followed by a suffix that indicates the scale factor. The buf array inherits a + * DEFAULT_FORMAT_LENGTH byte minimum length requirement from edit_unit64_with_commas(), + * although the output string is limited to eight characters. */ char *edit_uint64_with_suffix(uint64_t val, char *buf) { int commas = 0; char *c, mbuf[50]; - const char *suffix[] = - { "", "K", "M", "G", "T", "P", "E", "Z", "Y", "FIX ME" }; + static const char *suffix[] = { + "", + "K", + "M", + "G", + "T", + "P", + "E", + "Z", + "Y", + "FIX ME" + }; int suffixes = sizeof(suffix) / sizeof(*suffix); edit_uint64_with_commas(val, mbuf); - if ((c = strchr(mbuf, ',')) != NULL) { commas++; *c++ = '.'; @@ -111,49 +131,53 @@ char *edit_uint64_with_suffix(uint64_t val, char *buf) commas++; *c++ = '\0'; } - mbuf[5] = '\0'; // drop this to get '123.456 TB' rather than '123.4 TB' + mbuf[5] = '\0'; /* Drop this to get '123.456 TB' rather than '123.4 TB' */ + } + + if (commas >= suffixes) { + commas = suffixes - 1; } + bsnprintf(buf, DEFAULT_FORMAT_LENGTH, "%s %s", mbuf, suffix[commas]); - if (commas >= suffixes) - commas = suffixes - 1; - bsnprintf(buf, 27, "%s %s", mbuf, suffix[commas]); return buf; } /* - * Edit an integer number, the supplied buffer - * must be at least 27 bytes long. The incoming number - * is always widened to 64 bits. + * Edit an integer number, the supplied buffer must be at least DEFAULT_FORMAT_LENGTH bytes long. + * The incoming number is always widened to 64 bits. + * Replacement for sprintf(buf, "%" llu, val) */ char *edit_uint64(uint64_t val, char *buf) { - /* - * Replacement for sprintf(buf, "%" llu, val) - */ char mbuf[50]; - mbuf[sizeof(mbuf)-1] = 0; - int i = sizeof(mbuf)-2; /* edit backward */ + mbuf[sizeof(mbuf) - 1] = 0; + int i = sizeof(mbuf) - 2; /* Edit backward */ + if (val == 0) { mbuf[i--] = '0'; } else { while (val != 0) { - mbuf[i--] = "0123456789"[val%10]; + mbuf[i--] = "0123456789"[val % 10]; val /= 10; } } - bstrncpy(buf, &mbuf[i+1], 27); + bstrncpy(buf, &mbuf[i + 1], DEFAULT_FORMAT_LENGTH); + return buf; } +/* + * Edit an integer number, the supplied buffer must be at least DEFAULT_FORMAT_LENGTH bytes long. + * The incoming number is always widened to 64 bits. + * Replacement for sprintf(buf, "%" llu, val) + */ char *edit_int64(int64_t val, char *buf) { - /* - * Replacement for sprintf(buf, "%" llu, val) - */ char mbuf[50]; bool negative = false; - mbuf[sizeof(mbuf)-1] = 0; - int i = sizeof(mbuf)-2; /* edit backward */ + mbuf[sizeof(mbuf) - 1] = 0; + int i = sizeof(mbuf) - 2; /* Edit backward */ + if (val == 0) { mbuf[i--] = '0'; } else { @@ -162,31 +186,31 @@ char *edit_int64(int64_t val, char *buf) val = -val; } while (val != 0) { - mbuf[i--] = "0123456789"[val%10]; + mbuf[i--] = "0123456789"[val % 10]; val /= 10; } } if (negative) { mbuf[i--] = '-'; } - bstrncpy(buf, &mbuf[i+1], 27); + bstrncpy(buf, &mbuf[i + 1], DEFAULT_FORMAT_LENGTH); + return buf; } /* - * Edit an integer number with commas, the supplied buffer - * must be at least 27 bytes long. The incoming number - * is always widened to 64 bits. + * Edit an integer number with commas, the supplied buffer must be at least DEFAULT_FORMAT_LENGTH + * bytes long. The incoming number is always widened to 64 bits. */ char *edit_int64_with_commas(int64_t val, char *buf) { edit_int64(val, buf); + return add_commas(buf, buf); } /* - * Given a string "str", separate the numeric part into - * str, and the modifier into mod. + * Given a string "str", separate the numeric part into str, and the modifier into mod. */ static bool get_modifier(char *str, char *num, int num_len, char *mod, int mod_len) { @@ -195,19 +219,22 @@ static bool get_modifier(char *str, char *num, int num_len, char *mod, int mod_l strip_trailing_junk(str); len = strlen(str); - for (i=0; i (num_end - num_begin + 1)) { num_len = num_end - num_begin + 1; @@ -215,29 +242,37 @@ static bool get_modifier(char *str, char *num, int num_len, char *mod, int mod_l if (num_len == 0) { return false; } - /* Eat any spaces in front of modifier */ - for ( ; i (mod_end - mod_begin + 1)) { mod_len = mod_end - mod_begin + 1; } + Dmsg5(900, "str=%s: num_beg=%d num_end=%d mod_beg=%d mod_end=%d\n", - str, num_begin, num_end, mod_begin, mod_end); + str, num_begin, num_end, mod_begin, mod_end); bstrncpy(num, &str[num_begin], num_len); bstrncpy(mod, &str[mod_begin], mod_len); + if (!is_a_number(num)) { return false; } + bstrncpy(str, &str[mod_end], len); Dmsg2(900, "num=%s mod=%s\n", num, mod); @@ -247,7 +282,7 @@ static bool get_modifier(char *str, char *num, int num_len, char *mod, int mod_l /* * Convert a string duration to utime_t (64 bit seconds) * Returns false: if error - true: if OK, and value stored in value + * true: if OK, and value stored in value */ bool duration_to_utime(char *str, utime_t *value) { @@ -289,12 +324,15 @@ bool duration_to_utime(char *str, utime_t *value) if (!get_modifier(str, num_str, sizeof(num_str), mod_str, sizeof(mod_str))) { return false; } - /* Now find the multiplier corresponding to the modifier */ + + /* + * Now find the multiplier corresponding to the modifier + */ mod_len = strlen(mod_str); if (mod_len == 0) { - i = 1; /* default to seconds */ + i = 1; /* Default to seconds */ } else { - for (i=0; mod[i]; i++) { + for (i = 0; mod[i]; i++) { if (bstrncasecmp(mod_str, mod[i], mod_len)) { break; } @@ -303,15 +341,19 @@ bool duration_to_utime(char *str, utime_t *value) return false; } } + Dmsg2(900, "str=%s: mult=%d\n", num_str, mult[i]); errno = 0; val = strtod(num_str, NULL); + if (errno != 0 || val < 0) { return false; } + total += val * mult[i]; } *value = (utime_t)total; + return true; } @@ -321,13 +363,25 @@ bool duration_to_utime(char *str, utime_t *value) char *edit_utime(utime_t val, char *buf, int buf_len) { char mybuf[200]; - static const int32_t mult[] = {60*60*24*365, 60*60*24*30, 60*60*24, 60*60, 60}; - static const char *mod[] = {"year", "month", "day", "hour", "min"}; + static const int32_t mult[] = { + 60 * 60 * 24 * 365, + 60 *60 * 24 *30, + 60 *60 * 24, + 60 * 60, + 60 + }; + static const char *mod[] = { + "year", + "month", + "day", + "hour", + "min" + }; int i; uint32_t times; *buf = 0; - for (i=0; i<5; i++) { + for (i = 0; i < 5; i++) { times = (uint32_t)(val / mult[i]); if (times > 0) { val = val - (utime_t)times * mult[i]; @@ -335,12 +389,29 @@ char *edit_utime(utime_t val, char *buf, int buf_len) bstrncat(buf, mybuf, buf_len); } } + if (val == 0 && strlen(buf) == 0) { bstrncat(buf, "0 secs", buf_len); } else if (val != 0) { bsnprintf(mybuf, sizeof(mybuf), "%d sec%s", (uint32_t)val, val>1?"s":""); bstrncat(buf, mybuf, buf_len); } + + return buf; +} + +char *edit_pthread(pthread_t val, char *buf, int buf_len) +{ + int i; + char mybuf[3]; + unsigned char *ptc = (unsigned char *)(void *)(&val); + + bstrncpy(buf, "0x", buf_len); + for (i = sizeof(val); i; --i) { + bsnprintf(mybuf, sizeof(mybuf), "%02x", (unsigned)(ptc[i])); + bstrncat(buf, mybuf, buf_len); + } + return buf; } @@ -350,23 +421,28 @@ static bool strunit_to_uint64(char *str, uint64_t *value, const char **mod) double val; char mod_str[20]; char num_str[50]; - const int64_t mult[] = {1, /* byte */ - 1024, /* kilobyte */ - 1000, /* kb kilobyte */ - 1048576, /* megabyte */ - 1000000, /* mb megabyte */ - 1073741824, /* gigabyte */ - 1000000000}; /* gb gigabyte */ + static const int64_t mult[] = { + 1, /* Byte */ + 1024, /* kiloByte */ + 1000, /* KiB KiloByte */ + 1048576, /* MegaByte */ + 1000000, /* MiB MegaByte */ + 1073741824, /* GigaByte */ + 1000000000 /* GiB GigaByte */ + }; if (!get_modifier(str, num_str, sizeof(num_str), mod_str, sizeof(mod_str))) { return 0; } - /* Now find the multiplier corresponding to the modifier */ + + /* + * Now find the multiplier corresponding to the modifier + */ mod_len = strlen(mod_str); if (mod_len == 0) { - i = 0; /* default with no modifier = 1 */ + i = 0; /* Default with no modifier = 1 */ } else { - for (i=0; mod[i]; i++) { + for (i = 0; mod[i]; i++) { if (bstrncasecmp(mod_str, mod[i], mod_len)) { break; } @@ -375,43 +451,68 @@ static bool strunit_to_uint64(char *str, uint64_t *value, const char **mod) return false; } } + Dmsg2(900, "str=%s: mult=%d\n", str, mult[i]); errno = 0; val = strtod(num_str, NULL); + if (errno != 0 || val < 0) { return false; } *value = (utime_t)(val * mult[i]); + return true; } /* * Convert a size in bytes to uint64_t * Returns false: if error - true: if OK, and value stored in value + * true: if OK, and value stored in value */ bool size_to_uint64(char *str, uint64_t *value) { - /* first item * not used */ - static const char *mod[] = {"*", "k", "kb", "m", "mb", "g", "gb", NULL}; + /* + * First item * not used + */ + static const char *mod[] = { + "*", + "k", + "kb", + "m", + "mb", + "g", + "gb", + NULL + }; + return strunit_to_uint64(str, value, mod); } /* * Convert a speed in bytes/s to uint64_t * Returns false: if error - true: if OK, and value stored in value + * true: if OK, and value stored in value */ bool speed_to_uint64(char *str, uint64_t *value) { - /* first item * not used */ - static const char *mod[] = {"*", "k/s", "kb/s", "m/s", "mb/s", NULL}; + /* + * First item * not used + */ + static const char *mod[] = { + "*", + "k/s", + "kb/s", + "m/s", + "mb/s", + NULL + }; + return strunit_to_uint64(str, value, mod); } /* * Check if specified string is a number or not. - * Taken from SQLite, cool, thanks. + * Taken from SQLite, cool, thanks. */ bool is_a_number(const char *n) { @@ -420,19 +521,23 @@ bool is_a_number(const char *n) if( *n == '-' || *n == '+' ) { n++; } + while (B_ISDIGIT(*n)) { digit_seen = true; n++; } + if (digit_seen && *n == '.') { n++; while (B_ISDIGIT(*n)) { n++; } } + if (digit_seen && (*n == 'e' || *n == 'E') && (B_ISDIGIT(n[1]) || ((n[1]=='-' || n[1] == '+') && B_ISDIGIT(n[2])))) { - n += 2; /* skip e- or e+ or e digit */ + n += 2; /* Skip e- or e+ or e digit */ while (B_ISDIGIT(*n)) { n++; } } + return digit_seen && *n==0; } @@ -443,6 +548,7 @@ bool is_a_number_list(const char *n) { bool previous_digit = false; bool digit_seen = false; + while (*n) { if (B_ISDIGIT(*n)) { previous_digit=true; @@ -454,6 +560,7 @@ bool is_a_number_list(const char *n) } n++; } + return digit_seen && *n==0; } @@ -472,8 +579,7 @@ bool is_an_integer(const char *n) /* * Check if BAREOS Resoure Name is valid - */ -/* + * * Check if the Volume name has legal characters * If ua is non-NULL send the message */ @@ -499,7 +605,7 @@ bool is_name_valid(const char *name, POOLMEM *&msg) /* * Restrict the characters permitted in the Volume name */ - for (p=name; *p; p++) { + for (p = name; *p; p++) { if (B_ISALPHA(*p) || B_ISDIGIT(*p) || strchr(accept, (int)(*p))) { continue; } @@ -508,6 +614,7 @@ bool is_name_valid(const char *name, POOLMEM *&msg) } return false; } + len = p - name; if (len >= MAX_NAME_LENGTH) { if (msg) { @@ -515,6 +622,7 @@ bool is_name_valid(const char *name, POOLMEM *&msg) } return false; } + if (len == 0) { if (msg) { Mmsg(msg, _("Volume name must be at least one character long.\n")); @@ -538,8 +646,7 @@ bool is_name_valid(const char *name) } /* - * Add commas to a string, which is presumably - * a number. + * Add commas to a string, which is presumably a number. */ char *add_commas(char *val, char *buf) { @@ -559,11 +666,11 @@ char *add_commas(char *val, char *buf) q = p + nc; *q-- = *p--; for ( ; nc; nc--) { - for (i=0; i < 3; i++) { + for (i = 0; i < 3; i++) { *q-- = *p--; } *q-- = ','; } + return buf; } - diff --git a/src/lib/jcr.c b/src/lib/jcr.c index f8a060abadd..1070fc916f0 100644 --- a/src/lib/jcr.c +++ b/src/lib/jcr.c @@ -1296,7 +1296,7 @@ void dbg_jcr_add_hook(dbg_jcr_hook_t *hook) */ void dbg_print_jcr(FILE *fp) { - char buf1[128], buf2[128], buf3[128], buf4[128]; + char ed1[50], buf1[128], buf2[128], buf3[128], buf4[128]; if (!jcrs) { return; } @@ -1304,23 +1304,12 @@ void dbg_print_jcr(FILE *fp) fprintf(fp, "Attempt to dump current JCRs. njcrs=%d\n", jcrs->size()); for (JCR *jcr = (JCR *)jcrs->first(); jcr ; jcr = (JCR *)jcrs->next(jcr)) { -#ifdef HAVE_WIN32 - fprintf(fp, "threadid=%p JobId=%d JobStatus=%c jcr=%p name=%s\n", - (void *)&jcr->my_thread_id, (int)jcr->JobId, - jcr->JobStatus, jcr, jcr->Job); - fprintf(fp, "threadid=%p killable=%d JobId=%d JobStatus=%c " - "jcr=%p name=%s\n", - (void *)&jcr->my_thread_id, jcr->is_killable(), - (int)jcr->JobId, jcr->JobStatus, jcr, jcr->Job); -#else - fprintf(fp, "threadid=%p JobId=%d JobStatus=%c jcr=%p name=%s\n", - (void *)jcr->my_thread_id, (int)jcr->JobId, - jcr->JobStatus, jcr, jcr->Job); - fprintf(fp, "threadid=%p killable=%d JobId=%d JobStatus=%c " - "jcr=%p name=%s\n", - (void *)jcr->my_thread_id, jcr->is_killable(), + fprintf(fp, "threadid=%s JobId=%d JobStatus=%c jcr=%p name=%s\n", + edit_pthread(jcr->my_thread_id, ed1, sizeof(ed1)), (int)jcr->JobId, jcr->JobStatus, jcr, jcr->Job); -#endif + fprintf(fp, "threadid=%s killable=%d JobId=%d JobStatus=%c jcr=%p name=%s\n", + edit_pthread(jcr->my_thread_id, ed1, sizeof(ed1)), + jcr->is_killable(), (int)jcr->JobId, jcr->JobStatus, jcr, jcr->Job); fprintf(fp, "\tuse_count=%i\n", jcr->use_count()); fprintf(fp, "\tJobType=%c JobLevel=%c\n", jcr->getJobType(), jcr->getJobLevel()); diff --git a/src/lib/lockmgr.c b/src/lib/lockmgr.c index b6c14d40961..f5dc15a75b9 100644 --- a/src/lib/lockmgr.c +++ b/src/lib/lockmgr.c @@ -248,8 +248,6 @@ static bool contains_cycle(dlist *g) return false; } -/****************************************************************/ - class lmgr_thread_t: public SMARTALLOC { public: @@ -275,14 +273,18 @@ class lmgr_thread_t: public SMARTALLOC } void _dump(FILE *fp) { - fprintf(fp, "threadid=%p max=%i current=%i\n", - (void *)thread_id, max, current); - for(int i=0; i<=current; i++) { + char ed1[50]; + + fprintf(fp, "threadid=%s max=%i current=%i\n", + edit_pthread(thread_id, ed1, sizeof(ed1)), max, current); + + for (int i = 0; i <= current; i++) { fprintf(fp, " lock=%p state=%s priority=%i %s:%i\n", lock_list[i].lock, - (lock_list[i].state=='W')?"Wanted ":"Granted", + (lock_list[i].state=='W') ? "Wanted " : "Granted", lock_list[i].priority, - lock_list[i].file, lock_list[i].line); + lock_list[i].file, + lock_list[i].line); } } diff --git a/src/lib/protos.h b/src/lib/protos.h index bb3a40e2ec7..a77653e7a69 100644 --- a/src/lib/protos.h +++ b/src/lib/protos.h @@ -233,6 +233,7 @@ bool duration_to_utime(char *str, utime_t *value); bool size_to_uint64(char *str, uint64_t *value); bool speed_to_uint64(char *str, uint64_t *value); char *edit_utime(utime_t val, char *buf, int buf_len); +char *edit_pthread(pthread_t val, char *buf, int buf_len); bool is_a_number(const char *num); bool is_a_number_list(const char *n); bool is_an_integer(const char *n); diff --git a/src/lib/res.c b/src/lib/res.c index b66da376f8d..beee2169a72 100644 --- a/src/lib/res.c +++ b/src/lib/res.c @@ -43,7 +43,7 @@ extern CONFIG *my_config; /* Our Global config */ * Define the Union of all the common resource structure definitions. */ union URES { - MSGSRES res_msgs; + MSGSRES res_msgs; RES hdr; }; @@ -56,18 +56,23 @@ void b_LockRes(const char *file, int line) int errstat; #ifdef TRACE_RES + char ed1[50]; + Pmsg4(000, "LockRes locked=%d w_active=%d at %s:%d\n", res_locked, my_config->m_res_lock.w_active, file, line); - if (res_locked) { - Pmsg2(000, "LockRes writerid=%lu myid=%lu\n", - my_config->m_res_lock.writer_id, pthread_self()); - } + if (res_locked) { + Pmsg2(000, "LockRes writerid=%lu myid=%s\n", + my_config->m_res_lock.writer_id, + edit_pthread(pthread_self(), ed1, sizeof(ed1))); + } #endif + if ((errstat = rwl_writelock(&my_config->m_res_lock)) != 0) { Emsg3(M_ABORT, 0, _("rwl_writelock failure at %s:%d: ERR=%s\n"), file, line, strerror(errstat)); } + res_locked++; } diff --git a/src/stored/lock.c b/src/stored/lock.c index 814cb84812a..44be6394ef0 100644 --- a/src/stored/lock.c +++ b/src/stored/lock.c @@ -389,14 +389,13 @@ void DEVICE::rLock(bool locked) num_waiting++; /* indicate that I am waiting */ while (blocked()) { int status; -#ifndef HAVE_WIN32 - /* - * thread id on Win32 may be a struct - */ - Dmsg3(sd_dbglvl, "rLock blked=%s no_wait=%p me=%p\n", print_blocked(), - no_wait_id, pthread_self()); -#endif - if ((status = pthread_cond_wait(&this->wait, &m_mutex)) != 0) { + char ed1[50], ed2[50]; + + Dmsg3(sd_dbglvl, "rLock blked=%s no_wait=%s me=%s\n", + print_blocked(), + edit_pthread(no_wait_id, ed1, sizeof(ed1)), + edit_pthread(pthread_self(), ed2, sizeof(ed2))); + if ((status = pthread_cond_wait(&wait, &m_mutex)) != 0) { berrno be; this->Unlock(); Emsg1(M_ABORT, 0, _("pthread_cond_wait failure. ERR=%s\n"), From 44bddc87ada059e1de2792f9ab299ca3966e6377 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Sat, 1 Jul 2017 23:22:45 +0200 Subject: [PATCH 11/46] stored: Add device specific status trigger. This adds code that allows you to trigger a device to return specific device status information analog to the way which was already available to plugins using the bsdEventDriveStatus and bsdEventVolumeStatus events. In essence this is equivalent to the bsdEventDriveStatus which allows a plugin (for example the scsicrypto-sd plugin to return the crypto status of an LTO4+ drive.) but then for any device without the need to have a plugin loaded for the specific device. --- src/stored/dev.h | 13 +++++++- src/stored/sd_plugins.h | 7 ----- src/stored/status.c | 70 +++++++++++++++++++++++++++-------------- 3 files changed, 59 insertions(+), 31 deletions(-) diff --git a/src/stored/dev.h b/src/stored/dev.h index 624d4e5e1dc..d92ef38e20c 100644 --- a/src/stored/dev.h +++ b/src/stored/dev.h @@ -250,10 +250,20 @@ struct BLOCKSIZES { uint32_t min_block_size; }; -class DEVRES; /* Device resource defined in stored_conf.h */ +class DEVRES; /* Forward reference Device resource defined in stored_conf.h */ class DCR; /* Forward reference */ class VOLRES; /* Forward reference */ +/* + * Device specific status information either returned via DEVICE::device_status() + * method of via bsdEventDriveStatus and bsdEventVolumeStatus plugin events. + */ +typedef struct DevStatTrigger { + DEVRES *device; + POOLMEM *status; + int status_length; +} bsdDevStatTrig; + /* * Device structure definition. * @@ -504,6 +514,7 @@ class DEVICE: public SMARTALLOC { virtual bool reposition(DCR *dcr, uint32_t rfile, uint32_t rblock); virtual bool mount_backend(DCR *dcr, int timeout) { return true; }; virtual bool unmount_backend(DCR *dcr, int timeout) { return true; }; + virtual bool device_status(bsdDevStatTrig *dst) { return false; }; boffset_t lseek(DCR *dcr, boffset_t offset, int whence) { return d_lseek(dcr, offset, whence); }; bool truncate(DCR *dcr) { return d_truncate(dcr); }; diff --git a/src/stored/sd_plugins.h b/src/stored/sd_plugins.h index 1004c410a7f..41fc2826228 100644 --- a/src/stored/sd_plugins.h +++ b/src/stored/sd_plugins.h @@ -203,13 +203,6 @@ typedef struct s_sdpluginFuncs { #define sdplug_func(plugin) ((psdFuncs *)(plugin->pfuncs)) #define sdplug_info(plugin) ((genpInfo *)(plugin->pinfo)) -class DEVRES; -typedef struct s_sdbareosDevStatTrigger { - DEVRES *device; - POOLMEM *status; - int status_length; -} bsdDevStatTrig; - #ifdef __cplusplus } #endif diff --git a/src/stored/status.c b/src/stored/status.c index cb1df8d6694..0d7d03a9f19 100644 --- a/src/stored/status.c +++ b/src/stored/status.c @@ -52,10 +52,6 @@ static void sendit(const char *msg, int len, STATUS_PKT *sp); static void sendit(POOL_MEM &msg, int len, STATUS_PKT *sp); static void sendit(const char *msg, int len, void *arg); -static void trigger_device_status_hook(JCR *jcr, - DEVRES *device, - STATUS_PKT *sp, - bsdEventType eventType); static void send_blocked_status(DEVICE *dev, STATUS_PKT *sp); static void send_device_status(DEVICE *dev, STATUS_PKT *sp); static void list_terminated_jobs(STATUS_PKT *sp); @@ -214,6 +210,49 @@ static bool need_to_list_device(const char *devicenames, DEVRES *device) return true; } +/* + * Trigger the specific eventtype to get status information from any plugin that + * registered the event to return specific device information. + */ +static void trigger_device_status_hook(JCR *jcr, + DEVRES *device, + STATUS_PKT *sp, + bsdEventType eventType) +{ + bsdDevStatTrig dst; + + dst.device = device; + dst.status = get_pool_memory(PM_MESSAGE); + dst.status_length = 0; + + if (generate_plugin_event(jcr, eventType, &dst) == bRC_OK) { + if (dst.status_length > 0) { + sendit(dst.status, dst.status_length, sp); + } + } + free_pool_memory(dst.status); +} + +/* + * Ask the device if it want to log something specific in the status overview. + */ +static void get_device_specific_status(DEVRES *device, + STATUS_PKT *sp) +{ + bsdDevStatTrig dst; + + dst.device = device; + dst.status = get_pool_memory(PM_MESSAGE); + dst.status_length = 0; + + if (device->dev->device_status(&dst)) { + if (dst.status_length > 0) { + sendit(dst.status, dst.status_length, sp); + } + } + free_pool_memory(dst.status); +} + static void list_devices(JCR *jcr, STATUS_PKT *sp, const char *devicenames) { int len; @@ -300,7 +339,9 @@ static void list_devices(JCR *jcr, STATUS_PKT *sp, const char *devicenames) sendit(msg, len, sp); } + get_device_specific_status(device, sp); trigger_device_status_hook(jcr, device, sp, bsdEventDriveStatus); + send_blocked_status(dev, sp); if (dev->can_append()) { @@ -346,6 +387,8 @@ static void list_devices(JCR *jcr, STATUS_PKT *sp, const char *devicenames) len = Mmsg(msg, _("\nDevice \"%s\" is not open or does not exist.\n"), device->name()); sendit(msg, len, sp); } + + get_device_specific_status(device, sp); } if (!sp->api) { @@ -452,25 +495,6 @@ static void list_status_header(STATUS_PKT *sp) } } -static void trigger_device_status_hook(JCR *jcr, - DEVRES *device, - STATUS_PKT *sp, - bsdEventType eventType) -{ - bsdDevStatTrig dst; - - dst.device = device; - dst.status = get_pool_memory(PM_MESSAGE); - dst.status_length = 0; - - if (generate_plugin_event(jcr, eventType, &dst) == bRC_OK) { - if (dst.status_length > 0) { - sendit(dst.status, dst.status_length, sp); - } - } - free_pool_memory(dst.status); -} - static void send_blocked_status(DEVICE *dev, STATUS_PKT *sp) { int len; From 456512026194ad2c4a9774bb7d5394cbdbecd7bb Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Sat, 1 Jul 2017 23:22:45 +0200 Subject: [PATCH 12/46] stored: Implement object device chunked volumes. As some of the REST protocols only allow you to write blobs in total we now offer a chunked object store device type in which the volumes are chunked into pieces and these pieces are flushed to the backing store either on close or when we reach the maximum size of the chunk and need a new one. We also implemented the reading of such chunked devices. This runs on top of Scality's libdroplet with some local fixes and changes. --- autoconf/configure.in | 12 +- src/lib/Makefile.in | 12 +- src/lib/cbuf.c | 35 +- src/lib/cbuf.h | 13 +- src/lib/ordered_cbuf.c | 409 ++++++ src/lib/ordered_cbuf.h | 94 ++ src/stored/Makefile.in | 1 + src/stored/backends/Makefile.in | 12 +- src/stored/backends/chunked_device.c | 1095 +++++++++++++++++ src/stored/backends/chunked_device.h | 156 +++ src/stored/backends/object_store_device.c | 803 ++++++++---- src/stored/backends/object_store_device.h | 35 +- src/stored/backends/unix_tape_device.c | 10 + src/stored/dev.c | 1 + src/win32/stored/backends/win32_tape_device.c | 10 + 15 files changed, 2442 insertions(+), 256 deletions(-) create mode 100644 src/lib/ordered_cbuf.c create mode 100644 src/lib/ordered_cbuf.h create mode 100644 src/stored/backends/chunked_device.c create mode 100644 src/stored/backends/chunked_device.h diff --git a/autoconf/configure.in b/autoconf/configure.in index e48f550fdb7..c4f37c2ebbc 100644 --- a/autoconf/configure.in +++ b/autoconf/configure.in @@ -888,9 +888,10 @@ AC_HEADER_DIRENT AC_CHECK_HEADER(glob.h, [AC_DEFINE(HAVE_GLOB_H, 1, [Define to 1 if you have the header file.])] , ) AC_CHECK_HEADER(poll.h, [AC_DEFINE(HAVE_POLL_H, 1, [Define to 1 if you have the header file.])] , ) AC_CHECK_HEADER(sys/poll.h, [AC_DEFINE(HAVE_SYS_POLL_H, 1, [Define to 1 if you have the header file.])] , ) +AC_CHECK_HEADER(sys/mman.h, [AC_DEFINE(HAVE_SYS_MMAN_H, 1, [Define to 1 if you have the header file.])] , ) AC_CHECK_FUNCS(glob strcasecmp select poll setenv putenv tcgetattr) AC_CHECK_FUNCS(lstat lchown lchmod utimes lutimes futimes futimens fchmod fchown) -AC_CHECK_FUNCS(nanosleep nl_langinfo) +AC_CHECK_FUNCS(mmap nanosleep nl_langinfo) AC_CHECK_HEADERS(varargs.h) @@ -4542,14 +4543,18 @@ AC_SUBST_FILE(DEBIAN_CONTROL_STORAGE_PYTHON_PLUGIN) AC_SUBST_FILE(DEBIAN_CONTROL_DIRECTOR_PYTHON_PLUGIN) dnl build a list of storage backends we need to build. +BUILD_SD_BACKENDS="" if test x$use_libtool != xno; then - BUILD_SD_BACKENDS="libbareossd-fifo.la libbareossd-gentape.la libbareossd-tape.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-tape.la" if test X"$have_glusterfs" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gfapi.la" fi if test X"$have_droplet" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-chunked.la" BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-object.la" fi @@ -4564,13 +4569,10 @@ if test x$use_libtool != xno; then if test X"$have_elasto" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-elasto.la" fi -else - BUILD_SD_BACKENDS="" fi AC_SUBST(BUILD_SD_BACKENDS) - dnl Insanity check if test "x${subsysdir}" = "x${sbindir}" ; then echo " " diff --git a/src/lib/Makefile.in b/src/lib/Makefile.in index 32a51de7e28..5e0f97290e8 100644 --- a/src/lib/Makefile.in +++ b/src/lib/Makefile.in @@ -44,11 +44,11 @@ INCLUDE_FILES = ../include/baconfig.h ../include/bareos.h \ bsock_tcp.h bsock_udt.h bsr.h btime.h btimers.h cbuf.h \ crypto.h crypto_cache.h devlock.h dlist.h fnmatch.h \ guid_to_name.h htable.h ini.h lex.h lib.h lockmgr.h \ - md5.h mem_pool.h message.h mntent_cache.h parse_conf.h \ - plugins.h protos.h queue.h rblist.h runscript.h rwlock.h \ - scsi_crypto.h scsi_lli.h scsi_tapealert.h sellist.h \ - serial.h sha1.h smartall.h status.h tls.h tree.h var.h \ - waitq.h watchdog.h workq.h + md5.h mem_pool.h message.h mntent_cache.h ordered_cbuf.h \ + parse_conf.h plugins.h protos.h queue.h rblist.h \ + runscript.h rwlock.h scsi_crypto.h scsi_lli.h \ + scsi_tapealert.h sellist.h serial.h sha1.h smartall.h \ + status.h tls.h tree.h var.h waitq.h watchdog.h workq.h # # libbareos @@ -61,7 +61,7 @@ LIBBAREOS_SRCS = address_conf.c alist.c attr.c attribs.c base64.c \ crypto_cache.c crypto_gnutls.c crypto_none.c crypto_nss.c \ crypto_openssl.c crypto_wrap.c daemon.c devlock.c dlist.c \ edit.c fnmatch.c guid_to_name.c hmac.c htable.c jcr.c json.c \ - lockmgr.c md5.c mem_pool.c message.c mntent_cache.c \ + lockmgr.c md5.c mem_pool.c message.c mntent_cache.c ordered_cbuf.c \ output_formatter.c passphrase.c path_list.c plugins.c poll.c \ priv.c queue.c rblist.c runscript.c rwlock.c scan.c scsi_crypto.c \ scsi_lli.c scsi_tapealert.c sellist.c serial.c sha1.c signal.c \ diff --git a/src/lib/cbuf.c b/src/lib/cbuf.c index cf974676cce..59a14da3f14 100644 --- a/src/lib/cbuf.c +++ b/src/lib/cbuf.c @@ -31,7 +31,7 @@ /* * Initialize a new circular buffer. */ -int circbuf::init() +int circbuf::init(int capacity) { if (pthread_mutex_init(&m_lock, NULL) != 0) { return -1; @@ -51,7 +51,11 @@ int circbuf::init() m_next_in = 0; m_next_out = 0; m_size = 0; - m_capacity = QSIZE; + m_capacity = capacity; + if (m_data) { + free(m_data); + } + m_data = (void **)malloc(m_capacity * sizeof(void *)); return 0; } @@ -64,6 +68,10 @@ void circbuf::destroy() pthread_cond_destroy(&m_notempty); pthread_cond_destroy(&m_notfull); pthread_mutex_destroy(&m_lock); + if (m_data) { + free(m_data); + m_data = NULL; + } } /* @@ -86,9 +94,9 @@ int circbuf::enqueue(void *data) m_next_in %= m_capacity; /* - * Let a waiting consumer know there is data. + * Let any waiting consumer know there is data. */ - pthread_cond_signal(&m_notempty); + pthread_cond_broadcast(&m_notempty); pthread_mutex_unlock(&m_lock); @@ -100,7 +108,7 @@ int circbuf::enqueue(void *data) */ void *circbuf::dequeue() { - void *data; + void *data = NULL; if (pthread_mutex_lock(&m_lock) != 0) { return NULL; @@ -117,10 +125,7 @@ void *circbuf::dequeue() * When we are requested to flush and there is no data left return NULL. */ if (empty() && m_flush) { - m_flush = false; - pthread_mutex_unlock(&m_lock); - - return NULL; + goto bail_out; } data = m_data[m_next_out++]; @@ -128,10 +133,11 @@ void *circbuf::dequeue() m_next_out %= m_capacity; /* - * Let a waiting producer know there is room. + * Let all waiting producers know there is room. */ - pthread_cond_signal(&m_notfull); + pthread_cond_broadcast(&m_notfull); +bail_out: pthread_mutex_unlock(&m_lock); return data; @@ -169,12 +175,15 @@ int circbuf::flush() return -1; } + /* + * Set the flush flag. + */ m_flush = true; /* - * Let a waiting consumer know there will be no more data. + * Let all waiting consumers know there will be no more data. */ - pthread_cond_signal(&m_notempty); + pthread_cond_broadcast(&m_notempty); pthread_mutex_unlock(&m_lock); diff --git a/src/lib/cbuf.h b/src/lib/cbuf.h index 8b4ebb4dba1..373bb88d990 100644 --- a/src/lib/cbuf.h +++ b/src/lib/cbuf.h @@ -29,6 +29,7 @@ #define QSIZE 10 /* # of pointers in the queue */ class circbuf : public SMARTALLOC { +private: int m_size; int m_next_in; int m_next_out; @@ -37,12 +38,12 @@ class circbuf : public SMARTALLOC { pthread_mutex_t m_lock; /* Lock the structure */ pthread_cond_t m_notfull; /* Full -> not full condition */ pthread_cond_t m_notempty; /* Empty -> not empty condition */ - void *m_data[QSIZE]; /* Circular buffer of pointers */ + void **m_data; /* Circular buffer of pointers */ public: - circbuf(); + circbuf(int capacity = QSIZE); ~circbuf(); - int init(); + int init(int capacity); void destroy(); int enqueue(void *data); void *dequeue(); @@ -50,15 +51,17 @@ class circbuf : public SMARTALLOC { int flush(); bool full() { return m_size == m_capacity; }; bool empty() { return m_size == 0; }; + bool is_flushing() { return m_flush; }; int capacity() const { return m_capacity; }; }; /* * Constructor */ -inline circbuf::circbuf() +inline circbuf::circbuf(int capacity) { - init(); + m_data = NULL; + init(capacity); } /* diff --git a/src/lib/ordered_cbuf.c b/src/lib/ordered_cbuf.c new file mode 100644 index 00000000000..9b76300b533 --- /dev/null +++ b/src/lib/ordered_cbuf.c @@ -0,0 +1,409 @@ +/* + BAREOS® - Backup Archiving REcovery Open Sourced + + Copyright (C) 2016-2017 Planets Communications B.V. + + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. +*/ +/* + * Marco van Wieringen, December 2016. + */ + +/* + * Ordered Circular buffer used for producer/consumer problem with pthreads. + */ +#include "bareos.h" +#include "ordered_cbuf.h" + +/* + * Initialize a new ordered circular buffer. + */ +int ordered_circbuf::init(int capacity) +{ + struct ocbuf_item *item = NULL; + + if (pthread_mutex_init(&m_lock, NULL) != 0) { + return -1; + } + + if (pthread_cond_init(&m_notfull, NULL) != 0) { + pthread_mutex_destroy(&m_lock); + return -1; + } + + if (pthread_cond_init(&m_notempty, NULL) != 0) { + pthread_cond_destroy(&m_notfull); + pthread_mutex_destroy(&m_lock); + return -1; + } + + m_size = 0; + m_capacity = capacity; + m_reserved = 0; + if (m_data) { + m_data->destroy(); + delete m_data; + } + m_data = New(dlist(item, &item->link)); + + return 0; +} + +/* + * Destroy a ordered circular buffer. + */ +void ordered_circbuf::destroy() +{ + pthread_cond_destroy(&m_notempty); + pthread_cond_destroy(&m_notfull); + pthread_mutex_destroy(&m_lock); + if (m_data) { + m_data->destroy(); + delete m_data; + } +} + +/* + * Enqueue a new item into the ordered circular buffer. + */ +void *ordered_circbuf::enqueue(void *data, + uint32_t data_size, + int compare(void *item1, void *item2), + void update(void *item1, void *item2), + bool use_reserved_slot, + bool no_signal) +{ + struct ocbuf_item *new_item, *item; + + if (pthread_mutex_lock(&m_lock) != 0) { + return NULL; + } + + /* + * See if we should use a reserved slot and there are actually slots reserved. + */ + if (!use_reserved_slot || !m_reserved) { + /* + * Wait while the buffer is full. + */ + while (full()) { + pthread_cond_wait(&m_notfull, &m_lock); + } + } + + /* + * Decrease the number of reserved slots if we should use a reserved slot. + * We do this even when we don't really add a new item to the ordered + * circular list to keep the reserved slot counting consistent. + */ + if (use_reserved_slot) { + m_reserved--; + } + + /* + * Binary insert the data into the ordered circular buffer. If the + * item returned is not our new_item it means there is already an + * entry with the same keys on the ordered circular list. We then + * just call the update function callback which should perform the + * right actions to update the already existing item with the new + * data in the new item. The compare function callback is used to binary + * insert the item at the right location in the ordered circular list. + */ + new_item = (struct ocbuf_item *)malloc(sizeof(struct ocbuf_item)); + new_item->data = data; + new_item->data_size = data_size; + + item = (struct ocbuf_item *)m_data->binary_insert(new_item, compare); + if (item == new_item) { + m_size++; + } else { + /* + * Update the data on the ordered circular list with the new data. + * e.g. replace the old with the new data but don't allocate a new + * item on the ordered circular list. + */ + update(item, new_item); + + /* + * Release the unused ocbuf_item. + */ + free(new_item); + + /* + * Update data to point to the data that was attached to the original ocbuf_item. + */ + data = item->data; + } + + /* + * See if we need to signal any workers that work is available or not. + */ + if (!no_signal) { + /* + * Let any waiting consumer know there is data. + */ + pthread_cond_broadcast(&m_notempty); + } + + pthread_mutex_unlock(&m_lock); + + /* + * Return the data that is current e.g. either the new data passed in or + * the already existing data on the ordered circular list. + */ + return data; +} + +/* + * Dequeue an item from the ordered circular buffer. + */ +void *ordered_circbuf::dequeue(bool reserve_slot, + bool requeued, + struct timespec *ts, + int timeout) +{ + void *data = NULL; + struct ocbuf_item *item; + + if (pthread_mutex_lock(&m_lock) != 0) { + return NULL; + } + + /* + * Wait while there is nothing in the buffer + */ + while ((requeued || empty()) && !m_flush) { + /* + * The requeued state is only valid one time so clear it. + */ + requeued = false; + + /* + * See if we should block indefinitely or wake up + * after the given timer has expired and calculate + * the next time we need to wakeup. This way we check + * after the timer expired if there is work to be done + * this is something we need if the worker threads can + * put work back onto the circular queue and uses + * enqueue with the no_signal flag set. + */ + if (ts) { + pthread_cond_timedwait(&m_notempty, &m_lock, ts); + + /* + * See if there is really work to be done. + * We could be woken by the broadcast but some other iothread + * could take the work as we have to wait to reacquire the m_lock. + * Only one thread will be in the critical section and be able to + * hold the lock. + */ + if (empty() && !m_flush) { + struct timeval tv; + struct timezone tz; + + /* + * Calculate the next absolute timeout if we find + * out there is no work to be done. + */ + gettimeofday(&tv, &tz); + ts->tv_nsec = tv.tv_usec * 1000; + ts->tv_sec = tv.tv_sec + timeout; + + continue; + } + } else { + pthread_cond_wait(&m_notempty, &m_lock); + + /* + * See if there is really work to be done. + * We could be woken by the broadcast but some other iothread + * could take the work as we have to wait to reacquire the m_lock. + * Only one thread will be in the critical section and be able to + * hold the lock. + */ + if (empty() && !m_flush) { + continue; + } + } + } + + /* + * When we are requested to flush and there is no data left return NULL. + */ + if (empty() && m_flush) { + goto bail_out; + } + + /* + * Get the first item from the dlist and remove it. + */ + item = (struct ocbuf_item *)m_data->first(); + if (!item) { + goto bail_out; + } + + m_data->remove(item); + m_size--; + + /* + * Let all waiting producers know there is room. + */ + pthread_cond_broadcast(&m_notfull); + + /* + * Extract the payload and drop the placeholder. + */ + data = item->data; + free(item); + + /* + * Increase the reserved slot count when we are asked to reserve the slot. + */ + if (reserve_slot) { + m_reserved++; + } + +bail_out: + pthread_mutex_unlock(&m_lock); + + return data; +} + +/* + * Peek on the buffer for a certain item. + * We return a copy of the data on the ordered circular buffer. + * Any pointers in that data may become invallid after its returned + * to the calling function. As such you should not rely on the data. + */ +void *ordered_circbuf::peek(enum oc_peek_types type, + void *data, + int callback(void *item1, void *item2)) +{ + void *retval = NULL; + struct ocbuf_item *item; + + if (pthread_mutex_lock(&m_lock) != 0) { + return NULL; + } + + /* + * There is nothing to be seen on an empty ordered circular buffer. + */ + if (empty()) { + goto bail_out; + } + + /* + * Depending on the peek type start somewhere on the ordered list and + * walk forward or back. + */ + switch (type) { + case PEEK_FIRST: + item = (struct ocbuf_item *)m_data->first(); + while (item) { + if (callback(item->data, data) == 0) { + retval = malloc(item->data_size); + memcpy(retval, item->data, item->data_size); + goto bail_out; + } + + item = (struct ocbuf_item *)m_data->next(item); + } + break; + case PEEK_LAST: + item = (struct ocbuf_item *)m_data->last(); + while (item) { + if (callback(item->data, data) == 0) { + retval = malloc(item->data_size); + memcpy(retval, item->data, item->data_size); + goto bail_out; + } + + item = (struct ocbuf_item *)m_data->prev(item); + } + break; + case PEEK_LIST: + item = (struct ocbuf_item *)m_data->first(); + while (item) { + callback(item->data, data); + item = (struct ocbuf_item *)m_data->next(item); + } + break; + default: + goto bail_out; + } + +bail_out: + pthread_mutex_unlock(&m_lock); + + return retval; +} + +/* + * Unreserve a slot which was reserved by dequeue(). + */ +int ordered_circbuf::unreserve_slot() +{ + int retval = -1; + + if (pthread_mutex_lock(&m_lock) != 0) { + goto bail_out; + } + + /* + * Make sure any slots are still reserved. Otherwise people + * are playing games and should pay the price for doing so. + */ + if (m_reserved) { + m_reserved--; + + /* + * Let all waiting producers know there is room. + */ + pthread_cond_broadcast(&m_notfull); + + retval = 0; + } + pthread_mutex_unlock(&m_lock); + +bail_out: + return retval; +} + +/* + * Flush the ordered circular buffer. + * Any waiting consumer will be wakened and will see we are in flush state. + */ +int ordered_circbuf::flush() +{ + if (pthread_mutex_lock(&m_lock) != 0) { + return -1; + } + + /* + * Set the flush flag. + */ + m_flush = true; + + /* + * Let all waiting consumers know there will be no more data. + */ + pthread_cond_broadcast(&m_notempty); + + pthread_mutex_unlock(&m_lock); + + return 0; +} diff --git a/src/lib/ordered_cbuf.h b/src/lib/ordered_cbuf.h new file mode 100644 index 00000000000..3279c14a774 --- /dev/null +++ b/src/lib/ordered_cbuf.h @@ -0,0 +1,94 @@ +/* + BAREOS® - Backup Archiving REcovery Open Sourced + + Copyright (C) 2016-2017 Planets Communications B.V. + + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. +*/ +/* + * Marco van Wieringen, December 2016. + */ + +/* + * Ordered Circular buffer used for producer/consumer problem with pthread. + */ + +#define OQSIZE 10 /* # of pointers in the queue */ + +enum oc_peek_types { + PEEK_FIRST = 0, + PEEK_LAST, + PEEK_LIST +}; + +struct ocbuf_item { + dlink link; + uint32_t data_size; + void *data; +}; + +class ordered_circbuf : public SMARTALLOC { +private: + int m_size; + int m_capacity; + int m_reserved; + bool m_flush; + pthread_mutex_t m_lock; /* Lock the structure */ + pthread_cond_t m_notfull; /* Full -> not full condition */ + pthread_cond_t m_notempty; /* Empty -> not empty condition */ + dlist *m_data; /* Circular buffer of pointers */ + +public: + ordered_circbuf(int capacity = OQSIZE); + ~ordered_circbuf(); + int init(int capacity); + void destroy(); + void *enqueue(void *data, + uint32_t data_size, + int compare(void *item1, void *item2), + void update(void *item1, void *item2), + bool use_reserved_slot = false, + bool no_signal = false); + void *dequeue(bool reserve_slot = false, + bool requeued = false, + struct timespec *ts = NULL, + int timeout = 300); + void *peek(enum oc_peek_types type, + void *data, + int callback(void *item1, void *item2)); + int unreserve_slot(); + int flush(); + bool full() { return m_size == (m_capacity - m_reserved); }; + bool empty() { return m_size == 0; }; + bool is_flushing() { return m_flush; }; + int capacity() const { return m_capacity; }; +}; + +/* + * Constructor + */ +inline ordered_circbuf::ordered_circbuf(int capacity) +{ + init(capacity); +} + +/* + * Destructor + */ +inline ordered_circbuf::~ordered_circbuf() +{ + destroy(); +} diff --git a/src/stored/Makefile.in b/src/stored/Makefile.in index e4d431bb0c2..8a9afa7604f 100644 --- a/src/stored/Makefile.in +++ b/src/stored/Makefile.in @@ -28,6 +28,7 @@ first_rule: all dummy: AVAILABLE_DEVICE_API_SRCS = cephfs_device.c \ + chunked_device.c \ elasto_device.c \ gfapi_device.c \ object_store_device.c \ diff --git a/src/stored/backends/Makefile.in b/src/stored/backends/Makefile.in index dba7da8c074..62c20d9dd56 100644 --- a/src/stored/backends/Makefile.in +++ b/src/stored/backends/Makefile.in @@ -32,6 +32,9 @@ RADOS_LIBS = @RADOS_STRIPER_LIBS@ @RADOS_LIBS@ CHEPHFS_SRCS = cephfs_device.c CHEPHFS_LOBJS = $(CHEPHFS_SRCS:.c=.lo) +CHUNKED_SRCS = chunked_device.c +CHUNKED_LOBJS = $(CHUNKED_SRCS:.c=.lo) + ELASTO_SRCS = elasto_device.c ELASTO_LOBJS = $(ELASTO_SRCS:.c=.lo) @@ -110,6 +113,11 @@ libbareossd-cephfs.la: Makefile $(CHEPHFS_LOBJS) $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(CHEPHFS_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ -soname libbareossd-cephfs-$(LIBBAREOSSD_LT_RELEASE).so $(CEPHFS_LIBS) -lbareos +libbareossd-chunked.la: Makefile $(CHUNKED_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(CHUNKED_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ + -soname libbareossd-chunked-$(LIBBAREOSSD_LT_RELEASE).so -lbareos + libbareossd-elasto.la: Makefile $(ELASTO_LOBJS) @echo "Making $@ ..." $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(ELASTO_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ @@ -120,10 +128,10 @@ libbareossd-gfapi.la: Makefile $(GFAPI_LOBJS) $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(GFAPI_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ -soname libbareossd-gfapi-$(LIBBAREOSSD_LT_RELEASE).so $(GLUSTER_LIBS) -lbareos -libbareossd-object.la: Makefile $(OBJECT_LOBJS) +libbareossd-object.la: Makefile libbareossd-chunked.la $(OBJECT_LOBJS) @echo "Making $@ ..." $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(OBJECT_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ - -soname libbareossd-object-$(LIBBAREOSSD_LT_RELEASE).so $(DROPLET_LIBS) -lbareos + -soname libbareossd-object-$(LIBBAREOSSD_LT_RELEASE).so $(DROPLET_LIBS) libbareossd-chunked.la -lbareos libbareossd-rados.la: Makefile $(RADOS_LOBJS) @echo "Making $@ ..." diff --git a/src/stored/backends/chunked_device.c b/src/stored/backends/chunked_device.c new file mode 100644 index 00000000000..0f256861bc4 --- /dev/null +++ b/src/stored/backends/chunked_device.c @@ -0,0 +1,1095 @@ +/* + BAREOS® - Backup Archiving REcovery Open Sourced + + Copyright (C) 2015-2017 Planets Communications B.V. + + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation and included + in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. +*/ +/* + * Chunked volume device abstraction. + * + * Marco van Wieringen, February 2015 + */ + +#include "bareos.h" + +#if defined(HAVE_OBJECTSTORE) +#include "stored.h" +#include "chunked_device.h" + +#ifdef HAVE_MMAP +#ifdef HAVE_SYS_MMAN_H +#include +#endif +#endif + +/* + * This implements a device abstraction that provides so called chunked + * volumes. These chunks are kept in memory and flushed to the backing + * store when requested. This class fully abstracts the chunked volumes + * for the upper level device. The stacking for this device type is: + * + * :: + * | + * v + * chunked_device:: + * | + * v + * DEVICE:: + * + * The public interfaces exported from this device are: + * + * setup_chunk() - Setup a chunked volume for reading or writing. + * read_chunked() - Read a chunked volume. + * write_chunked() - Write a chunked volume. + * close_chunk() - Close a chunked volume. + * truncate_chunked_volume() - Truncate a chunked volume. + * chunked_volume_size() - Get the current size of a volume. + * load_chunk() - Make sure we have the right chunk in memory. + * + * It also demands that the inheriting class implements the + * following methods: + * + * flush_remote_chunk() - Flush a chunk to the remote backing store. + * read_remote_chunk() - Read a chunk from the remote backing store. + * chunked_remote_volume_size - Return the current size of a volume. + * truncate_remote_chunked_volume() - Truncate a chunked volume on the + * remote backing store. + */ + +/* + * Actual thread runner that processes IO request from circular buffer. + */ +static void *io_thread(void *data) +{ + char ed1[50]; + chunked_device *dev = (chunked_device *)data; + + /* + * Dequeue from the circular buffer until we are done. + */ + while (1) { + if (!dev->dequeue_chunk()) { + break; + } + } + + Dmsg1(100, "Stopping IO-thread threadid=%s\n", + edit_pthread(pthread_self(), ed1, sizeof(ed1))); + + return NULL; +} + +/* + * Allocate a new chunk buffer. + */ +char *chunked_device::allocate_chunkbuffer() +{ + char *buffer = NULL; + +#ifdef HAVE_MMAP + if (m_use_mmap) { + buffer = (char *)::mmap(NULL, m_current_chunk->chunk_size, + (PROT_READ | PROT_WRITE), + (MAP_SHARED | MAP_ANONYMOUS), + -1, 0); + Dmsg1(100, "Mapped %ld bytes for chunk buffer\n", m_current_chunk->chunk_size); + } else { +#endif + buffer = (char *)malloc(m_current_chunk->chunk_size); +#ifdef HAVE_MMAP + } +#endif + + Dmsg2(100, "New allocated buffer of %d bytes at %p\n", m_current_chunk->chunk_size, buffer); + + return buffer; +} + +/* + * Free a chunk buffer. + */ +void chunked_device::free_chunkbuffer(char *buffer) +{ + Dmsg2(100, "Freeing buffer of %d bytes at %p\n", m_current_chunk->chunk_size, buffer); + +#ifdef HAVE_MMAP + if (m_use_mmap) { + ::munmap(buffer, m_current_chunk->chunk_size); + Dmsg1(100, "Unmapped %ld bytes used as chunk buffer\n", m_current_chunk->chunk_size); + } else { +#endif + free(buffer); + + /* + * As we released a big memory chunk let the garbage collector run. + */ + garbage_collect_memory(); +#ifdef HAVE_MMAP + } +#endif +} + +/* + * Free a chunk_io_request. + */ +void chunked_device::free_chunk_io_request(chunk_io_request *request) +{ + Dmsg2(100, "Freeing chunk io request of %d bytes at %p\n", sizeof(chunk_io_request), request); + + if (request->release) { + free_chunkbuffer(request->buffer); + } + free((void *)request->volname); + free(request); +} + +/* + * Start the io-threads that are used for uploading. + */ +bool chunked_device::start_io_threads() +{ + char ed1[50]; + uint8_t thread_nr; + pthread_t thread_id; + thread_handle *handle; + + /* + * Create a new ordered circular buffer for exchanging chunks between + * the producer (the storage driver) and multiple consumers (io-threads). + */ + if (m_io_slots) { + m_cb = New(ordered_circbuf(m_io_threads * m_io_slots)); + } else { + m_cb = New(ordered_circbuf(m_io_threads * OQSIZE)); + } + + /* + * Start all IO threads and keep track of their thread ids in m_thread_ids. + */ + if (!m_thread_ids) { + m_thread_ids = New(alist(10, owned_by_alist)); + } + + for (thread_nr = 1; thread_nr <= m_io_threads; thread_nr++) { + if (pthread_create(&thread_id, NULL, io_thread, (void *)this)) { + return false; + } + + handle = (thread_handle *)malloc(sizeof(thread_handle)); + memset(handle, 0, sizeof(thread_handle)); + handle->type = WAIT_JOIN_THREAD; + memcpy(&handle->thread_id, &thread_id, sizeof(pthread_t)); + m_thread_ids->append(handle); + + Dmsg1(100, "Started new IO-thread threadid=%s\n", + edit_pthread(thread_id, ed1, sizeof(ed1))); + } + + m_io_threads_started = true; + + return true; +} + +/* + * Stop the io-threads that are used for uploading. + */ +void chunked_device::stop_threads() +{ + char ed1[50]; + thread_handle *handle; + + /* + * Tell all IO threads that we flush the circular buffer. + * As such they will get a NULL chunk_io_request back and exit. + */ + m_cb->flush(); + + /* + * Wait for all threads to exit. + */ + if (m_thread_ids) { + foreach_alist(handle, m_thread_ids) { + switch (handle->type) { + case WAIT_CANCEL_THREAD: + Dmsg1(100, "Canceling thread with threadid=%s\n", + edit_pthread(handle->thread_id, ed1, sizeof(ed1))); + pthread_cancel(handle->thread_id); + break; + case WAIT_JOIN_THREAD: + Dmsg1(100, "Waiting to join with threadid=%s\n", + edit_pthread(handle->thread_id, ed1, sizeof(ed1))); + pthread_join(handle->thread_id, NULL); + break; + default: + break; + } + } + + m_thread_ids->destroy(); + delete m_thread_ids; + m_thread_ids = NULL; + } +} + +/* + * Call back function for comparing two chunk_io_requests. + */ +static int compare_chunk_io_request(void *item1, void *item2) +{ + ocbuf_item *ocbuf1 = (ocbuf_item *)item1; + ocbuf_item *ocbuf2 = (ocbuf_item *)item2; + chunk_io_request *chunk1 = (chunk_io_request *)ocbuf1->data; + chunk_io_request *chunk2 = (chunk_io_request *)ocbuf2->data; + + /* + * Same volume name ? + */ + if (bstrcmp(chunk1->volname, chunk2->volname)) { + /* + * Compare on chunk number. + */ + if (chunk1->chunk == chunk2->chunk) { + return 0; + } else { + return (chunk1->chunk < chunk2->chunk) ? -1 : 1; + } + } else { + return strcmp(chunk1->volname, chunk2->volname); + } +} + +/* + * Call back function for updating two chunk_io_requests. + */ +static void update_chunk_io_request(void *item1, void *item2) +{ + ocbuf_item *ocbuf1 = (ocbuf_item *)item1; + ocbuf_item *ocbuf2 = (ocbuf_item *)item2; + chunk_io_request *chunk1 = (chunk_io_request *)ocbuf1->data; + chunk_io_request *chunk2 = (chunk_io_request *)ocbuf2->data; + + /* + * See if the new chunk_io_request has more bytes then + * the chunk_io_request currently on the ordered circular + * buffer. We can only have multiple chunk_io_requests for + * the same chunk of a volume when a chunk was not fully + * filled by one backup Job and a next one writes data to + * the chunk before its being flushed to backing store. This + * means all pointers are the same only the wbuflen and the + * release flag of the chunk_io_request differ. So we only + * copy those two fields and not the others. + */ + if (chunk2->buffer == chunk1->buffer && + chunk2->wbuflen > chunk1->wbuflen) { + chunk1->wbuflen = chunk2->wbuflen; + chunk1->release = chunk2->release; + } + chunk2->release = false; +} + +/* + * Enqueue a chunk flush request onto the ordered circular buffer. + */ +bool chunked_device::enqueue_chunk(chunk_io_request *request) +{ + chunk_io_request *new_request, + *enqueued_request; + + Dmsg2(100, "Enqueueing chunk %d of volume %s\n", request->chunk, request->volname); + + if (!m_io_threads_started) { + if (!start_io_threads()) { + return false; + } + } + + new_request = (chunk_io_request *)malloc(sizeof(chunk_io_request)); + memset(new_request, 0, sizeof(chunk_io_request)); + new_request->volname = bstrdup(request->volname); + new_request->chunk = request->chunk; + new_request->buffer = request->buffer; + new_request->wbuflen = request->wbuflen; + new_request->release = request->release; + + Dmsg2(100, "Allocated chunk io request of %d bytes at %p\n", sizeof(chunk_io_request), new_request); + + /* + * Enqueue the item onto the ordered circular buffer. + * This returns either the same request as we passed + * in or the previous flush request for the same chunk. + */ + enqueued_request = (chunk_io_request *)m_cb->enqueue(new_request, + sizeof(chunk_io_request), + compare_chunk_io_request, + update_chunk_io_request, + false, /* use_reserved_slot */ + false /* no_signal */); + + /* + * Compare the return value from the enqueue. + */ + if (enqueued_request && enqueued_request != new_request) { + free_chunk_io_request(new_request); + } + + return (enqueued_request) ? true : false; +} + +/* + * Dequeue a chunk flush request from the ordered circular buffer and process it. + */ +bool chunked_device::dequeue_chunk() +{ + char ed1[50]; + struct timeval tv; + struct timezone tz; + struct timespec ts; + bool requeued = false; + chunk_io_request *new_request; + + /* + * Loop while we are not done either due to the ordered circular buffer being flushed + * some fatal error or successfully dequeueing a chunk flush request. + */ + while (1) { + /* + * See if we are in the flushing state then we just return and exit the io-thread. + */ + if (m_cb->is_flushing()) { + return false; + } + + /* + * Calculate the next absolute timeout if we find out there is no work to be done. + */ + gettimeofday(&tv, &tz); + ts.tv_nsec = tv.tv_usec * 1000; + ts.tv_sec = tv.tv_sec + DEFAULT_RECHECK_INTERVAL; + + /* + * Dequeue the next item from the ordered circular buffer and reserve the slot as we + * might need to put this item back onto the ordered circular buffer if we fail to + * flush it to the remote backing store. Also let the dequeue wake up every + * DEFAULT_RECHECK_INTERVAL seconds to retry failed previous uploads. + */ + new_request = (chunk_io_request *)m_cb->dequeue(true, /* reserve_slot we may need to enqueue the request */ + requeued, /* request is requeued due to failure ? */ + &ts, DEFAULT_RECHECK_INTERVAL); + if (!new_request) { + return false; + } + + Dmsg3(100, "Flushing chunk %d of volume %s by thread %s\n", + new_request->chunk, new_request->volname, + edit_pthread(pthread_self(), ed1, sizeof(ed1))); + + if (!flush_remote_chunk(new_request)) { + chunk_io_request *enqueued_request; + + /* + * We failed to flush the chunk to the backing store + * so enqueue it again using the reserved slot by dequeue() + * but don't signal the workers otherwise we would try uploading + * the same chunk again and again by different io-threads. + * As we set the requeued flag to the dequeue method on the ordered circular buffer + * we will not try dequeueing any new item either until a new item is put + * onto the ordered circular buffer or after the retry interval has expired. + */ + Dmsg2(100, "Enqueueing chunk %d of volume %s for retry of upload later\n", + new_request->chunk, new_request->volname); + + /* + * Enqueue the item onto the ordered circular buffer. + * This returns either the same request as we passed + * in or the previous flush request for the same chunk. + */ + enqueued_request = (chunk_io_request *)m_cb->enqueue(new_request, + sizeof(chunk_io_request), + compare_chunk_io_request, + update_chunk_io_request, + true, /* use_reserved_slot */ + true /* no_signal */); + /* + * See if the enqueue succeeded. + */ + if (!enqueued_request) { + return false; + } + + /* + * Compare the return value from the enqueue against our new_request. + * If it is different there was already a chunk io request for the + * same chunk on the ordered circular buffer. + */ + if (enqueued_request != new_request) { + free_chunk_io_request(new_request); + } + + requeued = true; + continue; + } + + /* + * Unreserve the slot on the ordered circular buffer reserved by dequeue(). + */ + m_cb->unreserve_slot(); + + /* + * Processed the chunk so clean it up now. + */ + free_chunk_io_request(new_request); + + return true; + } +} + +/* + * Internal method for flushing a chunk to the backing store. + * The retry logic is in the io-threads but if those are not + * used we give this one try and otherwise drop the chunk and + * return an IO error to the upper level callers. That way the + * volume will go into error. + */ +bool chunked_device::flush_chunk(bool release_chunk, bool move_to_next_chunk) +{ + bool retval = false; + chunk_io_request request; + + /* + * Calculate in which chunk we are currently. + */ + request.chunk = m_current_chunk->start_offset / m_current_chunk->chunk_size; + request.volname = m_current_volname; + request.buffer = m_current_chunk->buffer; + request.wbuflen = m_current_chunk->buflen; + request.release = release_chunk; + + if (m_io_threads) { + retval = enqueue_chunk(&request); + } else { + retval = flush_remote_chunk(&request); + } + + /* + * Clear the need flushing flag. + */ + m_current_chunk->need_flushing = false; + + /* + * Change to the next chunk ? + */ + if (move_to_next_chunk) { + /* + * If we enqueued the data we need to allocate a new buffer. + */ + if (m_io_threads) { + m_current_chunk->buffer = allocate_chunkbuffer(); + } + m_current_chunk->start_offset += m_current_chunk->chunk_size; + m_current_chunk->end_offset = m_current_chunk->start_offset + (m_current_chunk->chunk_size - 1); + m_current_chunk->buflen = 0; + } else { + /* + * If we enqueued the data we need to allocate a new buffer. + */ + if (release_chunk && m_io_threads) { + m_current_chunk->buffer = NULL; + } + } + + if (!retval) { + Dmsg1(100, "%s", errmsg); + } + + return retval; +} + +/* + * Internal method for reading a chunk from the backing store. + */ +bool chunked_device::read_chunk() +{ + chunk_io_request request; + + /* + * Calculate in which chunk we are currently. + */ + request.chunk = m_current_chunk->start_offset / m_current_chunk->chunk_size; + request.volname = m_current_volname; + request.buffer = m_current_chunk->buffer; + request.wbuflen = m_current_chunk->chunk_size; + request.rbuflen = &m_current_chunk->buflen; + request.release = false; + + m_current_chunk->end_offset = m_current_chunk->start_offset + (m_current_chunk->chunk_size - 1); + + if (!read_remote_chunk(&request)) { + /* + * If the chunk doesn't exist on the backing store it has a size of 0 bytes. + */ + m_current_chunk->buflen = 0; + return false; + } + + return true; +} + +/* + * Setup a chunked volume for reading or writing. + */ +void chunked_device::setup_chunk(int flags) +{ + if (!m_current_chunk) { + m_current_chunk = (chunk_descriptor *)malloc(sizeof(chunk_descriptor)); + memset(m_current_chunk, 0, sizeof(chunk_descriptor)); + if (m_chunk_size > DEFAULT_CHUNK_SIZE) { + m_current_chunk->chunk_size = m_chunk_size; + } else { + m_current_chunk->chunk_size = DEFAULT_CHUNK_SIZE; + } + m_current_chunk->start_offset = -1; + m_current_chunk->end_offset = -1; + } + + /* + * Reopen of a device. + */ + if (m_current_chunk->opened) { + /* + * Invalidate chunk. + */ + m_current_chunk->buflen = 0; + m_current_chunk->start_offset = -1; + m_current_chunk->end_offset = -1; + } + + if (flags & O_RDWR) { + m_current_chunk->writing = true; + } + + m_current_chunk->opened = true; + m_current_chunk->chunk_setup = false; + + /* + * We need to limit the maximum size of a chunked volume to MAX_CHUNKS * chunk_size). + */ + if (max_volume_size == 0 || max_volume_size > (uint64_t)(MAX_CHUNKS * m_current_chunk->chunk_size)) { + max_volume_size = MAX_CHUNKS * m_current_chunk->chunk_size; + } + + /* + * On open set begin offset to 0. + */ + m_offset = 0; + + /* + * On open we are no longer at the End of the Media. + */ + m_end_of_media = false; + + /* + * Keep track of the volume currently mounted. + */ + if (m_current_volname) { + free(m_current_volname); + } + + m_current_volname = bstrdup(getVolCatName()); +} + +/* + * Read a chunked volume. + */ +ssize_t chunked_device::read_chunked(int fd, void *buffer, size_t count) +{ + ssize_t retval = 0; + + if (m_current_chunk->opened) { + ssize_t wanted_offset; + ssize_t bytes_left; + + /* + * Shortcut logic see if m_end_of_media is set then we are at the End of the Media + */ + if (m_end_of_media) { + goto bail_out; + } + + /* + * If we are starting reading without the chunk being setup it means we + * are start reading at the beginning of the file otherwise the d_lseek method + * would have read in the correct chunk. + */ + if (!m_current_chunk->chunk_setup) { + m_current_chunk->start_offset = 0; + + /* + * See if we have to allocate a new buffer. + */ + if (!m_current_chunk->buffer) { + m_current_chunk->buffer = allocate_chunkbuffer(); + } + + if (!read_chunk()) { + retval = -1; + goto bail_out; + } + m_current_chunk->chunk_setup = true; + } + + /* + * See if we can fulfill the wanted read from the current chunk. + */ + if (m_current_chunk->start_offset <= m_offset && + m_current_chunk->end_offset >= (boffset_t)((m_offset + count) - 1)) { + wanted_offset = (m_offset % m_current_chunk->chunk_size); + + bytes_left = MIN((ssize_t)count, (m_current_chunk->buflen - wanted_offset)); + Dmsg2(200, "Reading %d bytes at offset %d from chunk buffer\n", bytes_left, wanted_offset); + + if (bytes_left < 0) { + retval = -1; + goto bail_out; + } + + if (bytes_left > 0) { + memcpy(buffer, m_current_chunk->buffer + wanted_offset, bytes_left); + } + m_offset += bytes_left; + retval = bytes_left; + goto bail_out; + } else { + ssize_t offset = 0; + + /* + * We cannot fulfill the read from the current chunk, see how much + * is available and return that and see if by reading the next chunk + * we can fulfill the whole read. + */ + while (retval < (ssize_t)count) { + /* + * See how much is left in this chunk. + */ + wanted_offset = (m_offset % m_current_chunk->chunk_size); + bytes_left = MIN((ssize_t)count, (m_current_chunk->buflen - wanted_offset)); + + if (bytes_left > 0) { + Dmsg2(200, "Reading %d bytes at offset %d from chunk buffer\n", bytes_left, wanted_offset); + + memcpy(buffer, m_current_chunk->buffer + wanted_offset, bytes_left); + m_offset += bytes_left; + offset += bytes_left; + retval += bytes_left; + } + + /* + * Read in the next chunk. + */ + m_current_chunk->start_offset += m_current_chunk->chunk_size; + if (!read_chunk()) { + switch (dev_errno) { + case EIO: + /* + * If the are no more chunks to read we return only the bytes available. + * We also set m_end_of_media as we are at the end of media. + */ + m_end_of_media = true; + goto bail_out; + default: + retval = -1; + goto bail_out; + } + } else { + bytes_left = MIN((boffset_t)(count - retval), m_current_chunk->buflen); + + if (bytes_left > 0) { + Dmsg2(200, "Reading %d bytes at offset %d from chunk buffer\n", bytes_left, 0); + + memcpy((char *)buffer + offset, m_current_chunk->buffer, bytes_left); + m_offset += bytes_left; + retval += bytes_left; + } + } + } + } + } else { + errno = EBADF; + retval = -1; + } + +bail_out: + return retval; +} + +/* + * Write a chunked volume. + */ +ssize_t chunked_device::write_chunked(int fd, const void *buffer, size_t count) +{ + ssize_t retval = 0; + + if (m_current_chunk->opened) { + ssize_t wanted_offset; + + /* + * If we are starting writing without the chunk being setup it means we + * are start writing to an empty file because otherwise the d_lseek method + * would have read in the correct chunk. + */ + if (!m_current_chunk->chunk_setup) { + m_current_chunk->start_offset = 0; + m_current_chunk->end_offset = (m_current_chunk->chunk_size - 1); + m_current_chunk->buflen = 0; + m_current_chunk->chunk_setup = true; + + /* + * See if we have to allocate a new buffer. + */ + if (!m_current_chunk->buffer) { + m_current_chunk->buffer = allocate_chunkbuffer(); + } + } + + /* + * See if we can write the whole data inside the current chunk. + */ + if (m_current_chunk->start_offset <= m_offset && + m_current_chunk->end_offset >= (boffset_t)((m_offset + count) - 1)) { + + wanted_offset = (m_offset % m_current_chunk->chunk_size); + + Dmsg2(200, "Writing %d bytes at offset %d in chunk buffer\n", count, wanted_offset); + + memcpy(m_current_chunk->buffer + wanted_offset, buffer, count); + + m_offset += count; + if ((wanted_offset + count) > m_current_chunk->buflen) { + m_current_chunk->buflen = wanted_offset + count; + } + m_current_chunk->need_flushing = true; + retval = count; + } else { + ssize_t bytes_left; + ssize_t offset = 0; + + /* + * Things don't fit so first write as many bytes as can be written into + * the current chunk and then flush it and write the next bytes into the + * next chunk. + */ + while (retval < (ssize_t)count) { + /* + * See how much is left in this chunk. + */ + wanted_offset = (m_offset % m_current_chunk->chunk_size); + bytes_left = ((m_current_chunk->end_offset - (m_current_chunk->start_offset + wanted_offset)) + 1); + + if (bytes_left > 0) { + Dmsg2(200, "Writing %d bytes at offset %d in chunk buffer\n", bytes_left, wanted_offset); + + memcpy(m_current_chunk->buffer + wanted_offset, buffer, bytes_left); + m_offset += bytes_left; + if ((wanted_offset + bytes_left) > m_current_chunk->buflen) { + m_current_chunk->buflen = wanted_offset + bytes_left; + } + m_current_chunk->need_flushing = true; + retval += bytes_left; + + /* + * Keep track of the number of bytes we already consumed. + */ + offset += bytes_left; + } + + /* + * Flush out the current chunk. + */ + if (!flush_chunk(true /* release */, true /* move_to_next_chunk */)) { + retval = -1; + goto bail_out; + } + + bytes_left = MIN((boffset_t)(count - retval), ((m_current_chunk->end_offset - m_current_chunk->start_offset) + 1)); + if (bytes_left > 0) { + Dmsg2(200, "Writing %d bytes at offset %d in chunk buffer\n", bytes_left, 0); + + memcpy(m_current_chunk->buffer, (char *)buffer + offset, bytes_left); + m_current_chunk->buflen = bytes_left; + m_current_chunk->need_flushing = true; + m_offset += bytes_left; + retval += bytes_left; + } + } + } + } else { + errno = EBADF; + retval = -1; + } + +bail_out: + return retval; +} + +/* + * Close a chunked volume. + */ +int chunked_device::close_chunk() +{ + int retval = -1; + + if (m_current_chunk->opened) { + if (m_current_chunk->need_flushing) { + if (flush_chunk(true /* release */, false /* move_to_next_chunk */)) { + retval = 0; + } else { + dev_errno = EIO; + } + } + + /* + * Invalidate chunk. + */ + m_current_chunk->writing = false; + m_current_chunk->opened = false; + m_current_chunk->chunk_setup = false; + m_current_chunk->buflen = 0; + m_current_chunk->start_offset = -1; + m_current_chunk->end_offset = -1; + } else { + errno = EBADF; + } + + return retval; +} + +/* + * Truncate a chunked volume. + */ +bool chunked_device::truncate_chunked_volume(DCR *dcr) +{ + if (m_current_chunk->opened) { + if (!truncate_remote_chunked_volume(dcr)) { + return false; + } + + /* + * Reinitialize the initial chunk. + */ + m_current_chunk->start_offset = 0; + m_current_chunk->end_offset = (m_current_chunk->chunk_size - 1); + m_current_chunk->buflen = 0; + m_current_chunk->chunk_setup = true; + m_current_chunk->need_flushing = false; + + /* + * Reinitialize the volume name on a relabel we could get a new name. + */ + if (m_current_volname) { + free(m_current_volname); + } + + m_current_volname = bstrdup(getVolCatName()); + } + + return true; +} + +static int compare_volume_name(void *item1, void *item2) +{ + const char *volname = (const char *)item2; + chunk_io_request *request = (chunk_io_request *)item1; + + return strcmp(request->volname, volname); +} + +/* + * Get the current size of a volume. + */ +ssize_t chunked_device::chunked_volume_size() +{ + /* + * See if we are using io-threads or not and the ordered circbuf is created and not empty. + */ + if (m_io_threads > 0 && m_cb && !m_cb->empty()) { + char *volname; + chunk_io_request *request; + + volname = getVolCatName(); + + /* + * Peek on the ordered circular queue if there are any pending IO-requests + * for this volume. If there are use that as the indication of the size of + * the volume and don't contact the remote storage as there is still data + * inflight and as such we need to look at the last chunk that is still not + * uploaded of the volume. + */ + request = (chunk_io_request *)m_cb->peek(PEEK_LAST, volname, compare_volume_name); + if (request) { + ssize_t retval; + + /* + * Calculate the size of the volume based on the last chunk inflight . + */ + retval = (request->chunk * m_current_chunk->chunk_size) + request->wbuflen; + + /* + * The peek method gives us a cloned chunk_io_request with pointers to + * the original chunk_io_request. We just need to free the structure not + * the content so we call free() here and not free_chunk_io_request() ! + */ + free(request); + + return retval; + } + } + + /* + * Get the actual length by contacting the remote backing store. + */ + return chunked_remote_volume_size(); +} + +/* + * Make sure we have the right chunk in memory. + */ +bool chunked_device::load_chunk() +{ + boffset_t start_offset; + + start_offset = (m_offset / m_current_chunk->chunk_size) * m_current_chunk->chunk_size; + + /* + * See if we have to allocate a new buffer. + */ + if (!m_current_chunk->buffer) { + m_current_chunk->buffer = allocate_chunkbuffer(); + } + + /* + * If the wrong chunk is loaded populate the chunk buffer with the right data. + */ + if (start_offset != m_current_chunk->start_offset) { + m_current_chunk->buflen = 0; + m_current_chunk->start_offset = start_offset; + if (!read_chunk()) { + switch (dev_errno) { + case EIO: + if (m_current_chunk->writing) { + m_current_chunk->end_offset = start_offset + (m_current_chunk->chunk_size - 1); + } + break; + default: + return false; + } + } + } + m_current_chunk->chunk_setup = true; + + return true; +} + +static int list_io_request(void *request, void *data) +{ + chunk_io_request *io_request = (chunk_io_request *)request; + bsdDevStatTrig *dst = (bsdDevStatTrig *)data; + POOL_MEM status(PM_MESSAGE); + + status.bsprintf(" /%s/%04d - %ld\n", io_request->volname, io_request->chunk, io_request->wbuflen); + dst->status_length = pm_strcat(dst->status, status.c_str()); + + return 0; +} + +/* + * Return specific device status information. + */ +bool chunked_device::device_status(bsdDevStatTrig *dst) +{ + /* + * See if we are using io-threads or not and the ordered circbuf is created and not empty. + */ + dst->status_length = 0; + if (m_io_threads > 0 && m_cb) { + if (!m_cb->empty()) { + dst->status_length = pm_strcpy(dst->status, _("Pending IO flush requests:\n")); + + /* + * Peek on the ordered circular queue and list all pending requests. + */ + m_cb->peek(PEEK_LIST, dst, list_io_request); + } else { + dst->status_length = pm_strcpy(dst->status, _("No Pending IO flush requests\n")); + } + } + + return (dst->status_length > 0); +} + +chunked_device::~chunked_device() +{ + if (m_thread_ids) { + stop_threads(); + } + + if (m_cb) { + /* + * If there is any work on the ordered circular buffer remove it. + */ + if (!m_cb->empty()) { + chunk_io_request *request; + do { + request = (chunk_io_request *)m_cb->dequeue(); + if (request) { + request->release = true; + free_chunk_io_request(request); + } + } while (!m_cb->empty()); + } + + delete m_cb; + m_cb = NULL; + } + + if (m_current_chunk) { + if (m_current_chunk->buffer) { + free_chunkbuffer(m_current_chunk->buffer); + } + free(m_current_chunk); + m_current_chunk = NULL; + } + + if (m_current_volname) { + free(m_current_volname); + } +} + +chunked_device::chunked_device() +{ + m_current_volname = NULL; + m_current_chunk = NULL; + m_io_threads = 0; + m_io_slots = 0; + m_chunk_size = 0; + m_io_threads_started = false; + m_end_of_media = false; + m_cb = NULL; + m_io_threads = 0; + m_chunk_size = 0; + m_offset = 0; + m_use_mmap = false; +} +#endif /* HAVE_OBJECTSTORE */ diff --git a/src/stored/backends/chunked_device.h b/src/stored/backends/chunked_device.h new file mode 100644 index 00000000000..53a24bee5ed --- /dev/null +++ b/src/stored/backends/chunked_device.h @@ -0,0 +1,156 @@ +/* + BAREOS® - Backup Archiving REcovery Open Sourced + + Copyright (C) 2015-2017 Planets Communications B.V. + + This program is Free Software; you can redistribute it and/or + modify it under the terms of version three of the GNU Affero General Public + License as published by the Free Software Foundation, which is + listed in the file LICENSE. + + This program is distributed in the hope that it will be useful, but + WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program; if not, write to the Free Software + Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA + 02110-1301, USA. +*/ +/* + * Chunked device device abstraction. + * + * Marco van Wieringen, February 2015 + */ + +#ifndef CHUNKED_DEVICE_H +#define CHUNKED_DEVICE_H + +/* + * Let io-threads check for work every 300 seconds. + */ +#define DEFAULT_RECHECK_INTERVAL 300 + +/* + * Chunk the volume into chunks of this size. + * This is the lower limit used the exact chunksize is + * configured as a device option. + */ +#define DEFAULT_CHUNK_SIZE 10 * 1024 * 1024 + +/* + * Maximum number of chunks per volume. + * When you change this make sure you update the %04d format + * used in the code to format the chunk numbers e.g. 0000-9999 + */ +#define MAX_CHUNKS 10000 + +enum thread_wait_type { + WAIT_CANCEL_THREAD, /* Perform a pthread_cancel() on exit. */ + WAIT_JOIN_THREAD /* Perform a pthread_join() on exit. */ +}; + +struct thread_handle { + thread_wait_type type; /* See WAIT_*_THREAD thread_wait_type enum */ + pthread_t thread_id; /* Actual threadid */ +}; + +struct chunk_io_request { + const char *volname; /* VolumeName */ + uint16_t chunk; /* Chunk number */ + char *buffer; /* Data */ + uint32_t wbuflen; /* Size of the actual valid data in the chunk (Write) */ + uint32_t *rbuflen; /* Size of the actual valid data in the chunk (Read) */ + bool release; /* Should we release the data to which the buffer points ? */ +}; + +struct chunk_descriptor { + ssize_t chunk_size; /* Total size of the memory chunk */ + char *buffer; /* Data */ + uint32_t buflen; /* Size of the actual valid data in the chunk */ + boffset_t start_offset; /* Start offset of the current chunk */ + boffset_t end_offset; /* End offset of the current chunk */ + bool need_flushing; /* Data is dirty and needs flushing to backing store */ + bool chunk_setup; /* Chunk is initialized and ready for use */ + bool writing; /* We are currently writing */ + bool opened; /* An open call was done */ +}; + +#include "lib/ordered_cbuf.h" + +class chunked_device: public DEVICE { +private: + /* + * Private Members + */ + bool m_io_threads_started; + bool m_end_of_media; + char *m_current_volname; + ordered_circbuf *m_cb; + alist *m_thread_ids; + chunk_descriptor *m_current_chunk; + + /* + * Private Methods + */ + char *allocate_chunkbuffer(); + void free_chunkbuffer(char *buffer); + void free_chunk_io_request(chunk_io_request *request); + bool start_io_threads(); + void stop_threads(); + bool enqueue_chunk(chunk_io_request *request); + bool flush_chunk(bool release_chunk, bool move_to_next_chunk); + bool read_chunk(); + +protected: + /* + * Protected Members + */ + uint8_t m_io_threads; + uint8_t m_io_slots; + uint64_t m_chunk_size; + boffset_t m_offset; + bool m_use_mmap; + + /* + * Protected Methods + */ + void setup_chunk(int flags); + ssize_t read_chunked(int fd, void *buffer, size_t count); + ssize_t write_chunked(int fd, const void *buffer, size_t count); + int close_chunk(); + bool truncate_chunked_volume(DCR *dcr); + ssize_t chunked_volume_size(); + bool load_chunk(); + + /* + * Methods implemented by inheriting class. + */ + virtual bool flush_remote_chunk(chunk_io_request *request) = 0; + virtual bool read_remote_chunk(chunk_io_request *request) = 0; + virtual ssize_t chunked_remote_volume_size() = 0; + virtual bool truncate_remote_chunked_volume(DCR *dcr) = 0; + +public: + /* + * Public Methods + */ + chunked_device(); + virtual ~chunked_device(); + + bool dequeue_chunk(); + bool device_status(bsdDevStatTrig *dst); + + /* + * Interface from DEVICE + */ + virtual int d_close(int fd) = 0; + virtual int d_open(const char *pathname, int flags, int mode) = 0; + virtual int d_ioctl(int fd, ioctl_req_t request, char *mt = NULL) = 0; + virtual boffset_t d_lseek(DCR *dcr, boffset_t offset, int whence) = 0; + virtual ssize_t d_read(int fd, void *buffer, size_t count) = 0; + virtual ssize_t d_write(int fd, const void *buffer, size_t count) = 0; + virtual bool d_truncate(DCR *dcr) = 0; +}; +#endif /* CHUNKED_DEVICE_H */ diff --git a/src/stored/backends/object_store_device.c b/src/stored/backends/object_store_device.c index adaf248f355..6ca49a82441 100644 --- a/src/stored/backends/object_store_device.c +++ b/src/stored/backends/object_store_device.c @@ -1,7 +1,7 @@ /* BAREOS® - Backup Archiving REcovery Open Sourced - Copyright (C) 2014-2014 Planets Communications B.V. + Copyright (C) 2014-2017 Planets Communications B.V. Copyright (C) 2014-2014 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or @@ -22,6 +22,16 @@ /* * Object Storage API device abstraction. * + * Stacking is the following: + * + * object_store_device:: + * | + * v + * chunked_device:: + * | + * v + * DEVICE:: + * * Marco van Wieringen, February 2014 */ @@ -29,6 +39,7 @@ #ifdef HAVE_OBJECTSTORE #include "stored.h" +#include "chunked_device.h" #include "object_store_device.h" /* @@ -37,7 +48,14 @@ enum device_option_type { argument_none = 0, argument_profile, - argument_bucket + argument_location, + argument_canned_acl, + argument_storage_class, + argument_bucket, + argument_chunksize, + argument_iothreads, + argument_ioslots, + argument_mmap }; struct device_option { @@ -48,7 +66,14 @@ struct device_option { static device_option device_options[] = { { "profile=", argument_profile, 8 }, + { "location=", argument_location, 9 }, + { "acl=", argument_canned_acl, 4 }, + { "storageclass=", argument_storage_class, 13 }, { "bucket=", argument_bucket, 7 }, + { "chunksize=", argument_chunksize, 10 }, + { "iothreads=", argument_iothreads, 10 }, + { "ioslots=", argument_ioslots, 8 }, + { "mmap", argument_mmap, 4 }, { NULL, argument_none } }; @@ -73,6 +98,8 @@ static void object_store_logfunc(dpl_ctx_t *ctx, dpl_log_level_t level, const ch case DPL_ERROR: Emsg1(M_ERROR, 0, "%s\n", message); break; + default: + break; } } @@ -85,12 +112,26 @@ static inline int droplet_errno_to_system_errno(dpl_status_t status) case DPL_ENOENT: errno = ENOENT; break; + case DPL_ETIMEOUT: + errno = ETIMEDOUT; + case DPL_ENOMEM: + errno = ENOMEM; + break; case DPL_EIO: errno = EIO; break; case DPL_ENAMETOOLONG: errno = ENAMETOOLONG; break; + case DPL_ENOTDIR: + errno = ENOTDIR; + break; + case DPL_ENOTEMPTY: + errno = ENOTEMPTY; + break; + case DPL_EISDIR: + errno = EISDIR; + break; case DPL_EEXIST: errno = EEXIST; break; @@ -98,46 +139,398 @@ static inline int droplet_errno_to_system_errno(dpl_status_t status) errno = EPERM; break; default: + errno = EINVAL; break; } - return -1; + return errno; } /* - * Open a volume using libdroplet. + * Generic callback for the walk_dpl_directory() function. + * + * Returns true - abort loop + * false - continue loop */ -int object_store_device::d_open(const char *pathname, int flags, int mode) +typedef bool (*t_call_back)(dpl_dirent_t *dirent, dpl_ctx_t *ctx, + const char *dirname, void *data); + +/* + * Callback for getting the total size of a chunked volume. + */ +static bool chunked_volume_size_callback(dpl_dirent_t *dirent, dpl_ctx_t *ctx, + const char *dirname, void *data) +{ + ssize_t *volumesize = (ssize_t *)data; + + /* + * Make sure it starts with [0-9] e.g. a volume chunk. + */ + if (*dirent->name >= '0' && *dirent->name <= '9') { + *volumesize = *volumesize + dirent->size; + } + + return false; +} + +/* + * Callback for truncating a chunked volume. + */ +static bool chunked_volume_truncate_callback(dpl_dirent_t *dirent, dpl_ctx_t *ctx, + const char *dirname, void *data) { dpl_status_t status; - dpl_vfile_flag_t dpl_flags; + + /* + * Make sure it starts with [0-9] e.g. a volume chunk. + */ + if (*dirent->name >= '0' && *dirent->name <= '9') { + status = dpl_unlink(ctx, dirent->name); + + switch (status) { + case DPL_SUCCESS: + break; + default: + return true; + } + } + + return false; +} + +/* + * Generic function that walks a dirname and calls the callback + * function for each entry it finds in that directory. + */ +static bool walk_dpl_directory(dpl_ctx_t *ctx, const char *dirname, t_call_back callback, void *data) +{ + void *dir_hdl; + dpl_status_t status; + dpl_dirent_t dirent; + + if (dirname) { + status = dpl_chdir(ctx, dirname); + + switch (status) { + case DPL_SUCCESS: + break; + default: + return false; + } + } + + status = dpl_opendir(ctx, ".", &dir_hdl); + + switch (status) { + case DPL_SUCCESS: + break; + default: + return false; + } + + while (!dpl_eof(dir_hdl)) { + status = dpl_readdir(dir_hdl, &dirent); + + switch (status) { + case DPL_SUCCESS: + break; + default: + dpl_closedir(dir_hdl); + return false; + } + + /* + * Skip '.' and '..' + */ + if (bstrcmp(dirent.name, ".") || + bstrcmp(dirent.name, "..")) { + continue; + } + + if (callback(&dirent, ctx, dirname, data)) { + break; + } + } + + dpl_closedir(dir_hdl); + + if (dirname) { + status = dpl_chdir(ctx, "/"); + + switch (status) { + case DPL_SUCCESS: + break; + default: + return false; + } + } + + return true; +} + +/* + * Internal method for flushing a chunk to the backing store. + * This does the real work either by being called from a + * io-thread or directly blocking the device. + */ +bool object_store_device::flush_remote_chunk(chunk_io_request *request) +{ + bool retval = false; + dpl_status_t status; dpl_option_t dpl_options; + dpl_sysmd_t *sysmd = NULL; + POOL_MEM chunk_dir(PM_FNAME), + chunk_name(PM_FNAME); -#if 1 - Mmsg1(errmsg, _("Object Storage devices are not yet supported, please disable %s\n"), dev_name); - return -1; -#endif + Mmsg(chunk_dir, "/%s", request->volname); + Mmsg(chunk_name, "%s/%04d", chunk_dir.c_str(), request->chunk); + + Dmsg1(100, "Flushing chunk %s\n", chunk_name.c_str()); + + /* + * Check on the remote backing store if the chunk already exists. + * We only upload this chunk if it is bigger then the chunk that exists + * on the remote backing store. When using io-threads it could happen + * that there are multiple flush requests for the same chunk when a + * chunk is reused in a next backup job. We only want the chunk with + * the biggest amount of valid data to persist as we only append to + * chunks. + */ + sysmd = dpl_sysmd_dup(&m_sysmd); + status = dpl_getattr(m_ctx, /* context */ + chunk_name.c_str(), /* locator */ + NULL, /* metadata */ + sysmd); /* sysmd */ + + switch (status) { + case DPL_SUCCESS: + if (sysmd->size > request->wbuflen) { + retval = true; + goto bail_out; + } + break; + default: + /* + * Check on the remote backing store if the chunkdir exists. + */ + dpl_sysmd_free(sysmd); + sysmd = dpl_sysmd_dup(&m_sysmd); + status = dpl_getattr(m_ctx, /* context */ + chunk_dir.c_str(), /* locator */ + NULL, /* metadata */ + sysmd); /* sysmd */ + + switch (status) { + case DPL_SUCCESS: + break; + case DPL_ENOENT: + case DPL_FAILURE: + /* + * Make sure the chunk directory with the name of the volume exists. + */ + dpl_sysmd_free(sysmd); + sysmd = dpl_sysmd_dup(&m_sysmd); + status = dpl_mkdir(m_ctx, /* context */ + chunk_dir.c_str(), /* locator */ + NULL, /* metadata */ + sysmd);/* sysmd */ + + switch (status) { + case DPL_SUCCESS: + break; + default: + Mmsg2(errmsg, _("Failed to create direcory %s using dpl_mkdir(): ERR=%s.\n"), + chunk_dir.c_str(), dpl_status_str(status)); + dev_errno = droplet_errno_to_system_errno(status); + goto bail_out; + } + break; + default: + break; + } + break; + } + + /* + * Create some options for libdroplet. + * + * DPL_OPTION_NOALLOC - we provide the buffer to copy the data into + * no need to let the library allocate memory we + * need to free after copying the data. + */ + memset(&dpl_options, 0, sizeof(dpl_options)); + dpl_options.mask |= DPL_OPTION_NOALLOC; + + dpl_sysmd_free(sysmd); + sysmd = dpl_sysmd_dup(&m_sysmd); + status = dpl_fput(m_ctx, /* context */ + chunk_name.c_str(), /* locator */ + &dpl_options, /* options */ + NULL, /* condition */ + NULL, /* range */ + NULL, /* metadata */ + sysmd, /* sysmd */ + (char *)request->buffer, /* data_buf */ + request->wbuflen); /* data_len */ + + switch (status) { + case DPL_SUCCESS: + break; + default: + Mmsg2(errmsg, _("Failed to flush %s using dpl_fput(): ERR=%s.\n"), + chunk_name.c_str(), dpl_status_str(status)); + dev_errno = droplet_errno_to_system_errno(status); + goto bail_out; + } + + retval = true; + +bail_out: + if (sysmd) { + dpl_sysmd_free(sysmd); + } + + return retval; +} + +/* + * Internal method for reading a chunk from the remote backing store. + */ +bool object_store_device::read_remote_chunk(chunk_io_request *request) +{ + bool retval = false; + dpl_status_t status; + dpl_option_t dpl_options; + dpl_range_t dpl_range; + dpl_sysmd_t *sysmd = NULL; + POOL_MEM chunk_name(PM_FNAME); + + Mmsg(chunk_name, "/%s/%04d", request->volname, request->chunk); + Dmsg1(100, "Reading chunk %s\n", chunk_name.c_str()); + + /* + * See if chunk exists. + */ + sysmd = dpl_sysmd_dup(&m_sysmd); + status = dpl_getattr(m_ctx, /* context */ + chunk_name.c_str(), /* locator */ + NULL, /* metadata */ + sysmd); /* sysmd */ + + switch (status) { + case DPL_SUCCESS: + break; + default: + Mmsg1(errmsg, _("Failed to open %s doesn't exist\n"), chunk_name.c_str()); + Dmsg1(100, "%s", errmsg); + dev_errno = EIO; + goto bail_out; + } + + if (sysmd->size > request->wbuflen) { + Mmsg3(errmsg, _("Failed to read %s (%ld) to big to fit in chunksize of %ld bytes\n"), + chunk_name.c_str(), sysmd->size, request->wbuflen); + Dmsg1(100, "%s", errmsg); + dev_errno = EINVAL; + goto bail_out; + } + + /* + * Create some options for libdroplet. + * + * DPL_OPTION_NOALLOC - we provide the buffer to copy the data into + * no need to let the library allocate memory we + * need to free after copying the data. + */ + memset(&dpl_options, 0, sizeof(dpl_options)); + dpl_options.mask |= DPL_OPTION_NOALLOC; + + dpl_range.start = 0; + dpl_range.end = sysmd->size; + *request->rbuflen = sysmd->size; + dpl_sysmd_free(sysmd); + sysmd = dpl_sysmd_dup(&m_sysmd); + status = dpl_fget(m_ctx, /* context */ + chunk_name.c_str(), /* locator */ + &dpl_options, /* options */ + NULL, /* condition */ + &dpl_range, /* range */ + (char **)&request->buffer, /* data_bufp */ + request->rbuflen, /* data_lenp */ + NULL, /* metadatap */ + sysmd); /* sysmdp */ + + switch (status) { + case DPL_SUCCESS: + break; + case DPL_ENOENT: + Mmsg1(errmsg, _("Failed to open %s doesn't exist\n"), chunk_name.c_str()); + Dmsg1(100, "%s", errmsg); + dev_errno = EIO; + goto bail_out; + default: + Mmsg2(errmsg, _("Failed to read %s using dpl_fget(): ERR=%s.\n"), + chunk_name.c_str(), dpl_status_str(status)); + dev_errno = droplet_errno_to_system_errno(status); + goto bail_out; + } + + retval = true; + +bail_out: + if (sysmd) { + dpl_sysmd_free(sysmd); + } + + return retval; +} + +/* + * Internal method for truncating a chunked volume on the remote backing store. + */ +bool object_store_device::truncate_remote_chunked_volume(DCR *dcr) +{ + POOL_MEM chunk_dir(PM_FNAME); + + Mmsg(chunk_dir, "/%s", getVolCatName()); + if (!walk_dpl_directory(m_ctx, chunk_dir.c_str(), chunked_volume_truncate_callback, NULL)) { + return false; + } + + return true; +} + +/* + * Initialize backend. + */ +bool object_store_device::initialize() +{ + dpl_status_t status; /* * Initialize the droplet library when its not done previously. */ P(mutex); if (droplet_reference_count == 0) { + dpl_set_log_func(object_store_logfunc); + status = dpl_init(); - if (status != DPL_SUCCESS) { + switch (status) { + case DPL_SUCCESS: + break; + default: V(mutex); - return -1; + goto bail_out; } - - dpl_set_log_func(object_store_logfunc); - droplet_reference_count++; } + droplet_reference_count++; V(mutex); if (!m_object_configstring) { int len; - char *bp, *next_option; bool done; + uint64_t value; + char *bp, *next_option; if (!dev_options) { Mmsg0(errmsg, _("No device options configured\n")); @@ -161,14 +554,56 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) */ if (bstrncasecmp(bp, device_options[i].name, device_options[i].compare_size)) { switch (device_options[i].type) { - case argument_profile: - m_profile = bp + device_options[i].compare_size; + case argument_profile: { + char *profile; + + /* + * Strip any .profile prefix from the libdroplet profile name. + */ + profile = bp + device_options[i].compare_size; + len = strlen(profile); + if (len > 8 && bstrcasecmp(profile + (len - 8), ".profile")) { + profile[len - 8] = '\0'; + } + m_profile = profile; + done = true; + break; + } + case argument_location: + m_location = bp + device_options[i].compare_size; + done = true; + break; + case argument_canned_acl: + m_canned_acl = bp + device_options[i].compare_size; + done = true; + break; + case argument_storage_class: + m_storage_class = bp + device_options[i].compare_size; done = true; break; case argument_bucket: m_object_bucketname = bp + device_options[i].compare_size; done = true; break; + case argument_chunksize: + size_to_uint64(bp + device_options[i].compare_size, &value); + m_chunk_size = value; + done = true; + break; + case argument_iothreads: + size_to_uint64(bp + device_options[i].compare_size, &value); + m_io_threads = value & 0xFF; + done = true; + break; + case argument_ioslots: + size_to_uint64(bp + device_options[i].compare_size, &value); + m_io_slots = value & 0xFF; + done = true; + break; + case argument_mmap: + m_use_mmap = true; + done = true; + break; default: break; } @@ -189,14 +624,6 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) Emsg0(M_FATAL, 0, errmsg); goto bail_out; } - - /* - * Strip any .profile prefix from the libdroplet profile name. - */ - len = strlen(m_profile); - if (len > 8 && bstrcasecmp(m_profile + (len - 8), ".profile")) { - m_profile[len - 8] = '\0'; - } } /* @@ -204,18 +631,54 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) */ if (!m_ctx) { char *bp; + POOL_MEM temp(PM_NAME); + + /* + * Setup global sysmd settings which are cloned for each operation. + */ + memset(&m_sysmd, 0, sizeof(m_sysmd)); + if (m_location) { + pm_strcpy(temp, m_location); + m_sysmd.mask |= DPL_SYSMD_MASK_LOCATION_CONSTRAINT; + m_sysmd.location_constraint = dpl_location_constraint(temp.c_str()); + if (m_sysmd.location_constraint == -1) { + Mmsg2(errmsg, _("Illegal location argument %s for device %s%s\n"), temp.c_str(), dev_name); + goto bail_out; + } + } + + if (m_canned_acl) { + pm_strcpy(temp, m_canned_acl); + m_sysmd.mask |= DPL_SYSMD_MASK_CANNED_ACL; + m_sysmd.canned_acl = dpl_canned_acl(temp.c_str()); + if (m_sysmd.canned_acl == -1) { + Mmsg2(errmsg, _("Illegal canned_acl argument %s for device %s%s\n"), temp.c_str(), dev_name); + goto bail_out; + } + } + + if (m_storage_class) { + pm_strcpy(temp, m_storage_class); + m_sysmd.mask |= DPL_SYSMD_MASK_STORAGE_CLASS; + m_sysmd.storage_class = dpl_storage_class(temp.c_str()); + if (m_sysmd.storage_class == -1) { + Mmsg2(errmsg, _("Illegal storage_class argument %s for device %s%s\n"), temp.c_str(), dev_name); + goto bail_out; + } + } /* * See if this is a path. */ - bp = strrchr(m_object_configstring, '/'); + pm_strcpy(temp, m_profile); + bp = strrchr(temp.c_str(), '/'); if (!bp) { /* * Only a profile name. */ - m_ctx = dpl_ctx_new(NULL, m_object_configstring); + m_ctx = dpl_ctx_new(NULL, temp.c_str()); } else { - if (bp == m_object_configstring) { + if (bp == temp.c_str()) { /* * Profile in root of filesystem */ @@ -225,7 +688,7 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) * Profile somewhere else. */ *bp++ = '\0'; - m_ctx = dpl_ctx_new(m_object_configstring, bp); + m_ctx = dpl_ctx_new(temp.c_str(), bp); } } @@ -234,13 +697,15 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) */ if (!m_ctx) { Mmsg1(errmsg, _("Failed to create a new context using config %s\n"), dev_options); - return -1; + Dmsg1(100, "%s", errmsg); + goto bail_out; } /* * Login if that is needed for this backend. */ status = dpl_login(m_ctx); + switch (status) { case DPL_SUCCESS: break; @@ -250,76 +715,42 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) */ break; default: - Mmsg2(errmsg, _("Failed to login for voume %s using dpl_login(): ERR=%s.\n"), + Mmsg2(errmsg, _("Failed to login for volume %s using dpl_login(): ERR=%s.\n"), getVolCatName(), dpl_status_str(status)); - return -1; + Dmsg1(100, "%s", errmsg); + goto bail_out; } /* * If a bucketname was defined set it in the context. */ if (m_object_bucketname) { - m_ctx->cur_bucket = m_object_bucketname; + m_ctx->cur_bucket = bstrdup(m_object_bucketname); } } - /* - * See if we don't have a file open already. - */ - if (m_vfd) { - dpl_close(m_vfd); - m_vfd = NULL; - } + return true; - /* - * Create some options for libdroplet. - * - * DPL_OPTION_NOALLOC - we provide the buffer to copy the data into - * no need to let the library allocate memory we - * need to free after copying the data. - */ - memset(&dpl_options, 0, sizeof(dpl_options)); - dpl_options.mask |= DPL_OPTION_NOALLOC; +bail_out: + return false; +} - if (flags & O_CREAT) { - dpl_flags = DPL_VFILE_FLAG_CREAT | DPL_VFILE_FLAG_RDWR; - status = dpl_open(m_ctx, /* context */ - getVolCatName(), /* locator */ - dpl_flags, /* flags */ - &dpl_options, /* options */ - NULL, /* condition */ - NULL, /* metadata */ - NULL, /* sysmd */ - NULL, /* query_params */ - NULL, /* stream_status */ - &m_vfd); - } else { - dpl_flags = DPL_VFILE_FLAG_RDWR; - status = dpl_open(m_ctx, /* context */ - getVolCatName(), /* locator */ - dpl_flags, /* flags */ - &dpl_options, /* options */ - NULL, /* condition */ - NULL, /* metadata */ - NULL, /* sysmd */ - NULL, /* query_params */ - NULL, /* stream_status */ - &m_vfd); - } +/* + * Open a volume using libdroplet. + */ +int object_store_device::d_open(const char *pathname, int flags, int mode) +{ + int retval = -1; - switch (status) { - case DPL_SUCCESS: - m_offset = 0; - return 0; - default: - Mmsg2(errmsg, _("Failed to open %s using dpl_open(): ERR=%s.\n"), - getVolCatName(), dpl_status_str(status)); - m_vfd = NULL; - return droplet_errno_to_system_errno(status); + if (!initialize()) { + goto bail_out; } + setup_chunk(flags); + retval = 0; + bail_out: - return -1; + return retval; } /* @@ -327,26 +758,7 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) */ ssize_t object_store_device::d_read(int fd, void *buffer, size_t count) { - if (m_vfd) { - unsigned int buflen; - dpl_status_t status; - - buflen = count; - status = dpl_pread(m_vfd, count, m_offset, (char **)&buffer, &buflen); - - switch (status) { - case DPL_SUCCESS: - m_offset += buflen; - return buflen; - default: - Mmsg2(errmsg, _("Failed to read %s using dpl_read(): ERR=%s.\n"), - getVolCatName(), dpl_status_str(status)); - return droplet_errno_to_system_errno(status); - } - } else { - errno = EBADF; - return -1; - } + return read_chunked(fd, buffer, count); } /* @@ -354,43 +766,12 @@ ssize_t object_store_device::d_read(int fd, void *buffer, size_t count) */ ssize_t object_store_device::d_write(int fd, const void *buffer, size_t count) { - if (m_vfd) { - dpl_status_t status; - - status = dpl_pwrite(m_vfd, (char *)buffer, count, m_offset); - switch (status) { - case DPL_SUCCESS: - m_offset += count; - return count; - default: - Mmsg2(errmsg, _("Failed to write %s using dpl_write(): ERR=%s.\n"), - getVolCatName(), dpl_status_str(status)); - return droplet_errno_to_system_errno(status); - } - } else { - errno = EBADF; - return -1; - } + return write_chunked(fd, buffer, count); } int object_store_device::d_close(int fd) { - if (m_vfd) { - dpl_status_t status; - - status = dpl_close(m_vfd); - switch (status) { - case DPL_SUCCESS: - m_vfd = NULL; - return 0; - default: - m_vfd = NULL; - return droplet_errno_to_system_errno(status); - } - } else { - errno = EBADF; - return -1; - } + return close_chunk(); } int object_store_device::d_ioctl(int fd, ioctl_req_t request, char *op) @@ -399,33 +780,66 @@ int object_store_device::d_ioctl(int fd, ioctl_req_t request, char *op) } /* - * Open a directory on the object store and find out size information for a file. + * Open a directory on the object store and find out size information for a volume. */ -static inline size_t object_store_get_file_size(dpl_ctx_t *ctx, const char *filename) +ssize_t object_store_device::chunked_remote_volume_size() { - void *dir_hdl; dpl_status_t status; - dpl_dirent_t dirent; - size_t filesize = -1; + ssize_t volumesize = 0; + dpl_sysmd_t *sysmd = NULL; + POOL_MEM chunk_dir(PM_FNAME); + + Mmsg(chunk_dir, "/%s", getVolCatName()); + + /* + * FIXME: With the current version of libdroplet a dpl_getattr() on a directory + * fails with DPL_ENOENT even when the directory does exist. All other + * operations succeed and as walk_dpl_directory() does a dpl_chdir() anyway + * that will fail if the directory doesn't exist for now we should be + * mostly fine. + */ + +#if 0 + /* + * First make sure that the chunkdir exists otherwise it makes little sense to scan it. + */ + sysmd = dpl_sysmd_dup(&m_sysmd); + status = dpl_getattr(m_ctx, /* context */ + chunk_dir.c_str(), /* locator */ + NULL, /* metadata */ + sysmd); /* sysmd */ - status = dpl_opendir(ctx, ".", &dir_hdl); switch (status) { case DPL_SUCCESS: + /* + * Make sure the filetype is a directory and not a file. + */ + if (sysmd->ftype != DPL_FTYPE_DIR) { + volumesize = -1; + goto bail_out; + } break; + case DPL_ENOENT: + volumesize = -1; + goto bail_out; default: - return -1; + break; } +#endif - while (!dpl_eof(dir_hdl)) { - if (bstrcasecmp(dirent.name, filename)) { - filesize = dirent.size; - break; - } + if (!walk_dpl_directory(m_ctx, chunk_dir.c_str(), chunked_volume_size_callback, &volumesize)) { + volumesize = -1; + goto bail_out; } - dpl_closedir(dir_hdl); +bail_out: + if (sysmd) { + dpl_sysmd_free(sysmd); + } - return filesize; + Dmsg2(100, "Volume size of volume %s, %lld\n", chunk_dir.c_str(), volumesize); + + return volumesize; } boffset_t object_store_device::d_lseek(DCR *dcr, boffset_t offset, int whence) @@ -438,11 +852,14 @@ boffset_t object_store_device::d_lseek(DCR *dcr, boffset_t offset, int whence) m_offset += offset; break; case SEEK_END: { - size_t filesize; + ssize_t volumesize; + + volumesize = chunked_volume_size(); + + Dmsg1(100, "Current volumesize: %lld\n", volumesize); - filesize = object_store_get_file_size(m_ctx, getVolCatName()); - if (filesize >= 0) { - m_offset = filesize + offset; + if (volumesize >= 0) { + m_offset = volumesize + offset; } else { return -1; } @@ -452,78 +869,25 @@ boffset_t object_store_device::d_lseek(DCR *dcr, boffset_t offset, int whence) return -1; } + if (!load_chunk()) { + return -1; + } + return m_offset; } bool object_store_device::d_truncate(DCR *dcr) { - /* - * libdroplet doesn't have a truncate function so unlink the volume and create a new empty one. - */ - if (m_vfd) { - dpl_status_t status; - dpl_vfile_flag_t dpl_flags; - dpl_option_t dpl_options; - - status = dpl_close(m_vfd); - switch (status) { - case DPL_SUCCESS: - m_vfd = NULL; - break; - default: - Mmsg2(errmsg, _("Failed to close %s using dpl_close(): ERR=%s.\n"), - getVolCatName(), dpl_status_str(status)); - return false; - } - - status = dpl_unlink(m_ctx, getVolCatName()); - switch (status) { - case DPL_SUCCESS: - break; - default: - Mmsg2(errmsg, _("Failed to unlink %s using dpl_unlink(): ERR=%s.\n"), - getVolCatName(), dpl_status_str(status)); - return false; - } - - /* - * Create some options for libdroplet. - * - * DPL_OPTION_NOALLOC - we provide the buffer to copy the data into - * no need to let the library allocate memory we - * need to free after copying the data. - */ - memset(&dpl_options, 0, sizeof(dpl_options)); - dpl_options.mask |= DPL_OPTION_NOALLOC; - - dpl_flags = DPL_VFILE_FLAG_CREAT | DPL_VFILE_FLAG_RDWR; - status = dpl_open(m_ctx, /* context */ - getVolCatName(), /* locator */ - dpl_flags, /* flags */ - &dpl_options, /* options */ - NULL, /* condition */ - NULL, /* metadata */ - NULL, /* sysmd */ - NULL, /* query_params */ - NULL, /* stream_status */ - &m_vfd); - - switch (status) { - case DPL_SUCCESS: - break; - default: - Mmsg2(errmsg, _("Failed to open %s using dpl_open(): ERR=%s.\n"), - getVolCatName(), dpl_status_str(status)); - return false; - } - } - - return true; + return truncate_chunked_volume(dcr); } object_store_device::~object_store_device() { if (m_ctx) { + if (m_object_bucketname && m_ctx->cur_bucket) { + free(m_ctx->cur_bucket); + m_ctx->cur_bucket = NULL; + } dpl_ctx_free(m_ctx); m_ctx = NULL; } @@ -544,6 +908,9 @@ object_store_device::object_store_device() { m_object_configstring = NULL; m_object_bucketname = NULL; + m_location = NULL; + m_canned_acl = NULL; + m_storage_class = NULL; m_ctx = NULL; } diff --git a/src/stored/backends/object_store_device.h b/src/stored/backends/object_store_device.h index d80912ed736..dfa1077a18d 100644 --- a/src/stored/backends/object_store_device.h +++ b/src/stored/backends/object_store_device.h @@ -1,7 +1,7 @@ /* BAREOS® - Backup Archiving REcovery Open Sourced - Copyright (C) 2014-2014 Planets Communications B.V. + Copyright (C) 2014-2017 Planets Communications B.V. Copyright (C) 2014-2014 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or @@ -31,23 +31,44 @@ #include #include -class object_store_device: public DEVICE { +class object_store_device: public chunked_device { private: + /* + * Private Members + */ char *m_object_configstring; - char *m_profile; - char *m_object_bucketname; + const char *m_profile; + const char *m_location; + const char *m_canned_acl; + const char *m_storage_class; + const char *m_object_bucketname; dpl_ctx_t *m_ctx; - dpl_vfile_t *m_vfd; - boffset_t m_offset; + dpl_sysmd_t m_sysmd; + + /* + * Private Methods + */ + bool initialize(); + + /* + * Interface from chunked_device + */ + bool flush_remote_chunk(chunk_io_request *request); + bool read_remote_chunk(chunk_io_request *request); + ssize_t chunked_remote_volume_size(); + bool truncate_remote_chunked_volume(DCR *dcr); public: + /* + * Public Methods + */ object_store_device(); ~object_store_device(); /* * Interface from DEVICE */ - int d_close(int); + int d_close(int fd); int d_open(const char *pathname, int flags, int mode); int d_ioctl(int fd, ioctl_req_t request, char *mt = NULL); boffset_t d_lseek(DCR *dcr, boffset_t offset, int whence); diff --git a/src/stored/backends/unix_tape_device.c b/src/stored/backends/unix_tape_device.c index eabe3d2b75f..894d859cf05 100644 --- a/src/stored/backends/unix_tape_device.c +++ b/src/stored/backends/unix_tape_device.c @@ -23,6 +23,16 @@ /* * UNIX Tape API device abstraction. * + * Stacking is the following: + * + * unix_tape_device:: + * | + * v + * generic_tape_device:: + * | + * v + * DEVICE:: + * * Marco van Wieringen, December 2013 */ diff --git a/src/stored/dev.c b/src/stored/dev.c index 539eb67d233..f87e1aea589 100644 --- a/src/stored/dev.c +++ b/src/stored/dev.c @@ -78,6 +78,7 @@ #include "backends/gfapi_device.h" #endif #ifdef HAVE_OBJECTSTORE +#include "backends/chunked_device.h" #include "backends/object_store_device.h" #endif #ifdef HAVE_RADOS diff --git a/src/win32/stored/backends/win32_tape_device.c b/src/win32/stored/backends/win32_tape_device.c index a88e487774f..20fefc39536 100644 --- a/src/win32/stored/backends/win32_tape_device.c +++ b/src/win32/stored/backends/win32_tape_device.c @@ -29,6 +29,16 @@ * Robert Nelson, May, 2006 * * Extracted from other source files Marco van Wieringen, December 2013 + * + * Stacking is the following: + * + * win32_tape_device:: + * | + * v + * generic_tape_device:: + * | + * v + * DEVICE:: */ #include "bareos.h" From 2c893f0022f7acba54748f714a43d7568f2c9097 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Sat, 1 Jul 2017 23:22:45 +0200 Subject: [PATCH 13/46] build: Rebuild configure and config.h.in --- autoconf/config.h.in | 5 ++++- configure | 19 ++++++++++++++----- 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/autoconf/config.h.in b/autoconf/config.h.in index cd83a56804c..d7eeca4afac 100644 --- a/autoconf/config.h.in +++ b/autoconf/config.h.in @@ -583,7 +583,7 @@ /* Define to 1 if you have the `mempcpy' function. */ #undef HAVE_MEMPCPY -/* Define to 1 if you have a working `mmap' system call. */ +/* Define to 1 if you have the `mmap' function. */ #undef HAVE_MMAP /* Define to 1 if you have the header file. */ @@ -889,6 +889,9 @@ /* Define to 1 if you have the header file. */ #undef HAVE_SYS_IOCTL_H +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_MMAN_H + /* Define to 1 if you have the header file. */ #undef HAVE_SYS_MTIO_H diff --git a/configure b/configure index 43649809287..6827b1dca18 100755 --- a/configure +++ b/configure @@ -23591,6 +23591,14 @@ $as_echo "#define HAVE_SYS_POLL_H 1" >>confdefs.h fi +ac_fn_c_check_header_mongrel "$LINENO" "sys/mman.h" "ac_cv_header_sys_mman_h" "$ac_includes_default" +if test "x$ac_cv_header_sys_mman_h" = xyes; then : + +$as_echo "#define HAVE_SYS_MMAN_H 1" >>confdefs.h + +fi + + for ac_func in glob strcasecmp select poll setenv putenv tcgetattr do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` @@ -23615,7 +23623,7 @@ _ACEOF fi done -for ac_func in nanosleep nl_langinfo +for ac_func in mmap nanosleep nl_langinfo do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -33808,14 +33816,18 @@ fi +BUILD_SD_BACKENDS="" if test x$use_libtool != xno; then - BUILD_SD_BACKENDS="libbareossd-fifo.la libbareossd-gentape.la libbareossd-tape.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-tape.la" if test X"$have_glusterfs" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gfapi.la" fi if test X"$have_droplet" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-chunked.la" BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-object.la" fi @@ -33830,13 +33842,10 @@ if test x$use_libtool != xno; then if test X"$have_elasto" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-elasto.la" fi -else - BUILD_SD_BACKENDS="" fi - if test "x${subsysdir}" = "x${sbindir}" ; then echo " " echo " " From 26b22dcdcf5fc27eaf8ccbfda0ff1c4afeea7888 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Tue, 8 Aug 2017 11:32:37 +0200 Subject: [PATCH 14/46] stored: Add example configs for objectstorage device. --- .../bareos-dir.d/storage/Object.conf.example | 8 ++++++ .../device/ObjectStorage.conf.example | 26 +++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 src/stored/backends/object_store_device.d/bareos-dir.d/storage/Object.conf.example create mode 100644 src/stored/backends/object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example diff --git a/src/stored/backends/object_store_device.d/bareos-dir.d/storage/Object.conf.example b/src/stored/backends/object_store_device.d/bareos-dir.d/storage/Object.conf.example new file mode 100644 index 00000000000..394e0dc0069 --- /dev/null +++ b/src/stored/backends/object_store_device.d/bareos-dir.d/storage/Object.conf.example @@ -0,0 +1,8 @@ +Storage { + Name = ObjectS3 + Address = "Replace this by the Bareos Storage Daemon FQDN or IP address" + Password = "Replace this by the Bareos Storage Daemon director password" + Device = ObjectStorage + Media Type = S3_File1 +} + diff --git a/src/stored/backends/object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example b/src/stored/backends/object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example new file mode 100644 index 00000000000..dd4d748b098 --- /dev/null +++ b/src/stored/backends/object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example @@ -0,0 +1,26 @@ +Device { + Name = ObjectStorage + Media Type = S3_File1 + Archive Device = Object S3 Storage + # + # Config options: + # profile= - Droplet profile to use either absolute PATH or logical name (e.g. ~/.droplet/.profile + # location= - AWS location (e.g. us-east etc.) + # acl= - Canned ACL + # storageclass= - Storage Class to use. + # bucket= - Bucket to store objects in. + # chunksize= - Size of Volume Chunks (default = 10 Mb) + # iothreads= - Number of IO-threads to use for uploads (use blocking uploads if not set.) + # ioslots= - Number of IO-slots per IO-thread (default 10) + # mmap - Use mmap to allocate Chunk memory instead of malloc(). + # + Device Options = "profile=/etc/bareos/bareos-sd.d/.objectstorage/objectstorage.profile,bucket=bareos,iothreads=2" + Device Type = object + LabelMedia = yes # lets Bareos label unlabeled media + Random Access = yes + AutomaticMount = yes # when device opened, read it + RemovableMedia = no + AlwaysOpen = no + Description = "Object S3 device. A connecting Director must have the same Name and MediaType." + Maximum File Size = 200000000 # 200 MB (Allows for seeking to small portions of the Volume) +} From f6a23af2599bb361af4fb8621a1d88da5facaecf Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Tue, 8 Aug 2017 12:53:24 +0200 Subject: [PATCH 15/46] build: Add support for object storage backend --- platforms/packaging/bareos.spec | 30 ++++++++++++++++++++++++++++++ src/stored/backends/Makefile.in | 2 ++ 2 files changed, 32 insertions(+) diff --git a/platforms/packaging/bareos.spec b/platforms/packaging/bareos.spec index 2218333060e..7083b06383b 100644 --- a/platforms/packaging/bareos.spec +++ b/platforms/packaging/bareos.spec @@ -44,6 +44,7 @@ Vendor: The Bareos Team %define build_sqlite3 1 %define check_cmocka 1 %define glusterfs 0 +%define objectstorage 0 %define have_git 1 %define ceph 0 %define install_suse_fw 0 @@ -125,6 +126,10 @@ BuildRequires: systemd-rpm-macros %{?systemd_requires} %endif +%if 0%{?objectstorage} +BuildRequires: libdroplet-devel +%endif + %if 0%{?glusterfs} BuildRequires: glusterfs-devel glusterfs-api-devel %endif @@ -306,6 +311,15 @@ Requires(pre): pwdutils Requires(pre): shadow-utils %endif +%if 0%{?objectstorage} +%package storage-object +Summary: Object Storage support for the Bareos Storage daemon +Group: Productivity/Archiving/Backup +Requires: %{name}-common = %{version} +Requires: %{name}-storage = %{version} +Requires: libdroplet-common +%endif + %if 0%{?glusterfs} %package storage-glusterfs Summary: GlusterFS support for the Bareos Storage daemon @@ -567,6 +581,13 @@ This package contains the Storage Daemon This package contains the Storage Daemon tape support (Bareos service to read and write data from/to tape media) +%if 0%{?objectstorage} +%description storage-object +%{dscr} + +This package contains the Storage backend for Object Storage. +%endif + %if 0%{?glusterfs} %description storage-glusterfs %{dscr} @@ -1013,6 +1034,15 @@ echo "This is a meta package to install a full bareos system" > %{buildroot}%{_d %{_sysconfdir}/bareos/bareos-dir.d/storage/NULL.conf.example %{_sysconfdir}/bareos/bareos-sd.d/device/NULL.conf.example +%if 0%{?objectstorage} +%files storage-object +%defattr(-, root, root) +%{backend_dir}/libbareossd-chunked*.so +%{backend_dir}/libbareossd-object*.so +%{_sysconfdir}/bareos/bareos-dir.d/storage/Object.conf.example +%{_sysconfdir}/bareos/bareos-sd.d/device/ObjectStorage.conf.example +%endif + %if 0%{?glusterfs} %files storage-glusterfs %defattr(-, root, root) diff --git a/src/stored/backends/Makefile.in b/src/stored/backends/Makefile.in index 62c20d9dd56..d590bacbf30 100644 --- a/src/stored/backends/Makefile.in +++ b/src/stored/backends/Makefile.in @@ -83,6 +83,7 @@ STORED_RESTYPES = autochanger device director ndmp messages storage $(ELASTO_LOBJS): @echo "Compiling $(@:.lo=.c)" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(ELASTO_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) + if [ -d "$(@:.lo=.d)" ]; then $(MKDIR) $(CONF_EXTRA_DIR); $(CP) -r $(@:.lo=.d)/. $(CONF_EXTRA_DIR)/.; fi $(CHEPHFS_LOBJS): @echo "Compiling $(@:.lo=.c)" @@ -97,6 +98,7 @@ $(GFAPI_LOBJS): $(OBJECT_LOBJS): @echo "Compiling $(@:.lo=.c)" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DROPLET_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) + if [ -d "$(@:.lo=.d)" ]; then $(MKDIR) $(CONF_EXTRA_DIR); $(CP) -r $(@:.lo=.d)/. $(CONF_EXTRA_DIR)/.; fi $(RADOS_LOBJS): @echo "Compiling $(@:.lo=.c)" From a8040e0ad75ffbb31ab0a32455d6abf19dc6bcdb Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Wed, 11 Oct 2017 09:49:44 +0200 Subject: [PATCH 16/46] stored: Fix chunked_device writing. We missed a corner case where the write exactly aligns with the chunksize. Also added a guarding MIN() in the overflow writing part so we never write more then count bytes and crash in memcpy() on to many bytes to copy for the buffer being passed in. The code now has more defensive coding and some things might never be triggered but better make sure things always work. You can now also have a blocksize that is bigger then your chunksize (wrong config but possible to configure) and then still things should work. --- src/stored/backends/chunked_device.c | 73 +++++++++++++++++----------- 1 file changed, 44 insertions(+), 29 deletions(-) diff --git a/src/stored/backends/chunked_device.c b/src/stored/backends/chunked_device.c index 0f256861bc4..2798066f420 100644 --- a/src/stored/backends/chunked_device.c +++ b/src/stored/backends/chunked_device.c @@ -678,22 +678,27 @@ ssize_t chunked_device::read_chunked(int fd, void *buffer, size_t count) /* * We cannot fulfill the read from the current chunk, see how much * is available and return that and see if by reading the next chunk - * we can fulfill the whole read. + * we can fulfill the whole read. When then we still have not filled + * the whole buffer we keep on reading any next chunk until none are + * left and we have reached End Of Media. */ while (retval < (ssize_t)count) { /* * See how much is left in this chunk. */ - wanted_offset = (m_offset % m_current_chunk->chunk_size); - bytes_left = MIN((ssize_t)count, (m_current_chunk->buflen - wanted_offset)); + if (m_offset < m_current_chunk->end_offset) { + wanted_offset = (m_offset % m_current_chunk->chunk_size); + bytes_left = MIN((ssize_t)(count - offset), + (ssize_t)(m_current_chunk->buflen - wanted_offset)); - if (bytes_left > 0) { - Dmsg2(200, "Reading %d bytes at offset %d from chunk buffer\n", bytes_left, wanted_offset); + if (bytes_left > 0) { + Dmsg2(200, "Reading %d bytes at offset %d from chunk buffer\n", bytes_left, wanted_offset); - memcpy(buffer, m_current_chunk->buffer + wanted_offset, bytes_left); - m_offset += bytes_left; - offset += bytes_left; - retval += bytes_left; + memcpy(((char *)buffer + offset), m_current_chunk->buffer + wanted_offset, bytes_left); + m_offset += bytes_left; + offset += bytes_left; + retval += bytes_left; + } } /* @@ -714,13 +719,18 @@ ssize_t chunked_device::read_chunked(int fd, void *buffer, size_t count) goto bail_out; } } else { - bytes_left = MIN((boffset_t)(count - retval), m_current_chunk->buflen); + /* + * Calculate how much data we can read from the just freshly read chunk. + */ + bytes_left = MIN((ssize_t)(count - offset), + (ssize_t)(m_current_chunk->buflen)); if (bytes_left > 0) { Dmsg2(200, "Reading %d bytes at offset %d from chunk buffer\n", bytes_left, 0); - memcpy((char *)buffer + offset, m_current_chunk->buffer, bytes_left); + memcpy(((char *)buffer + offset), m_current_chunk->buffer, bytes_left); m_offset += bytes_left; + offset += bytes_left; retval += bytes_left; } } @@ -789,30 +799,30 @@ ssize_t chunked_device::write_chunked(int fd, const void *buffer, size_t count) /* * Things don't fit so first write as many bytes as can be written into * the current chunk and then flush it and write the next bytes into the - * next chunk. + * next chunk. When then things still don't fit loop until all bytes are + * written. */ while (retval < (ssize_t)count) { /* * See how much is left in this chunk. */ - wanted_offset = (m_offset % m_current_chunk->chunk_size); - bytes_left = ((m_current_chunk->end_offset - (m_current_chunk->start_offset + wanted_offset)) + 1); + if (m_offset < m_current_chunk->end_offset) { + wanted_offset = (m_offset % m_current_chunk->chunk_size); + bytes_left = MIN((ssize_t)(count - offset), + (ssize_t)((m_current_chunk->end_offset - (m_current_chunk->start_offset + wanted_offset)) + 1)); - if (bytes_left > 0) { - Dmsg2(200, "Writing %d bytes at offset %d in chunk buffer\n", bytes_left, wanted_offset); + if (bytes_left > 0) { + Dmsg2(200, "Writing %d bytes at offset %d in chunk buffer\n", bytes_left, wanted_offset); - memcpy(m_current_chunk->buffer + wanted_offset, buffer, bytes_left); - m_offset += bytes_left; - if ((wanted_offset + bytes_left) > m_current_chunk->buflen) { - m_current_chunk->buflen = wanted_offset + bytes_left; + memcpy(m_current_chunk->buffer + wanted_offset, ((char *)buffer + offset), bytes_left); + m_offset += bytes_left; + if ((wanted_offset + bytes_left) > m_current_chunk->buflen) { + m_current_chunk->buflen = wanted_offset + bytes_left; + } + m_current_chunk->need_flushing = true; + offset += bytes_left; + retval += bytes_left; } - m_current_chunk->need_flushing = true; - retval += bytes_left; - - /* - * Keep track of the number of bytes we already consumed. - */ - offset += bytes_left; } /* @@ -823,14 +833,19 @@ ssize_t chunked_device::write_chunked(int fd, const void *buffer, size_t count) goto bail_out; } - bytes_left = MIN((boffset_t)(count - retval), ((m_current_chunk->end_offset - m_current_chunk->start_offset) + 1)); + /* + * Calculate how much data we can fit into the just freshly created chunk. + */ + bytes_left = MIN((ssize_t)(count - offset), + (ssize_t)((m_current_chunk->end_offset - m_current_chunk->start_offset) + 1)); if (bytes_left > 0) { Dmsg2(200, "Writing %d bytes at offset %d in chunk buffer\n", bytes_left, 0); - memcpy(m_current_chunk->buffer, (char *)buffer + offset, bytes_left); + memcpy(m_current_chunk->buffer, ((char *)buffer + offset), bytes_left); m_current_chunk->buflen = bytes_left; m_current_chunk->need_flushing = true; m_offset += bytes_left; + offset += bytes_left; retval += bytes_left; } } From 3f3d04e1ad01f31a6aa6b7e153a5586a0b9da843 Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Tue, 10 Oct 2017 05:02:04 -0500 Subject: [PATCH 17/46] HP-UX: build on current HP-UX 11.31 This patch contains the changes needed to build bareos on HP-UX 11.31 - changes in bareos.h - removed undef _INCLUDE_POSIX1C_SOURCE - added #undef HAVE_LCHMOD - gigaslam.c: - added _LARGEFILE_SOURCE 1 to enable fseeko() - configure: - define -D_INCLUDE_XOPEN_SOURCE_EXTENDED=1 --- autoconf/configure.in | 4 ++-- src/include/bareos.h | 7 ++++--- src/tests/gigaslam.c | 3 +++ 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/autoconf/configure.in b/autoconf/configure.in index e48f550fdb7..c3346ae77e3 100644 --- a/autoconf/configure.in +++ b/autoconf/configure.in @@ -4302,8 +4302,8 @@ hurd) ;; hpux) PSCMD="UNIX95=1; ps -e -o pid,comm" - CFLAGS="${CFLAGS} -D_XOPEN_SOURCE_EXTENDED=1" - CXXFLAGS="${CXXFLAGS} -D_XOPEN_SOURCE_EXTENDED=1" + CFLAGS="${CFLAGS} -D_XOPEN_SOURCE_EXTENDED=1 -D_INCLUDE_XOPEN_SOURCE_EXTENDED=1" + CXXFLAGS="${CXXFLAGS} -D_XOPEN_SOURCE_EXTENDED=1 -D_INCLUDE_XOPEN_SOURCE_EXTENDED=1" TAPEDRIVE="/dev/rmt/0hnb" PTHREAD_LIB="-lpthread" AC_DEFINE(_INCLUDE_LONGLONG, 1, [Needed on HP-UX/g++ systems to support long long ints (int64)]) diff --git a/src/include/bareos.h b/src/include/bareos.h index c48970e96a8..05bd5462b8b 100644 --- a/src/include/bareos.h +++ b/src/include/bareos.h @@ -42,6 +42,10 @@ #include "hostconfig.h" +#ifdef HAVE_HPUX_OS +#undef HAVE_LCHMOD +#endif + #define _REENTRANT 1 #define _THREAD_SAFE 1 #define _POSIX_PTHREAD_SEMANTICS 1 @@ -60,9 +64,6 @@ #include #endif #if HAVE_UNISTD_H -# ifdef HAVE_HPUX_OS -# undef _INCLUDE_POSIX1C_SOURCE -# endif #include #endif #if HAVE_UMEM_H diff --git a/src/tests/gigaslam.c b/src/tests/gigaslam.c index d0db25ef93b..fbdcefbd9ed 100644 --- a/src/tests/gigaslam.c +++ b/src/tests/gigaslam.c @@ -19,6 +19,9 @@ */ +/* Define to make fseeko etc. visible, on some hosts e.g. HP-UX */ +#define _LARGEFILE_SOURCE 1 + #define HOW_BIG 1000000000ll #ifdef __GNUC__ From 8eae08d1d76ad7adfaaae675a654ef2a2b80aa3f Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Thu, 19 Oct 2017 11:15:56 +0200 Subject: [PATCH 18/46] created new configure because of changes for hp-ux --- configure | 18 +++--------------- 1 file changed, 3 insertions(+), 15 deletions(-) diff --git a/configure b/configure index 43649809287..c68735ea32f 100755 --- a/configure +++ b/configure @@ -963,7 +963,6 @@ infodir docdir oldincludedir includedir -runstatedir localstatedir sharedstatedir sysconfdir @@ -1151,7 +1150,6 @@ datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' -runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' @@ -1404,15 +1402,6 @@ do | -silent | --silent | --silen | --sile | --sil) silent=yes ;; - -runstatedir | --runstatedir | --runstatedi | --runstated \ - | --runstate | --runstat | --runsta | --runst | --runs \ - | --run | --ru | --r) - ac_prev=runstatedir ;; - -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ - | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ - | --run=* | --ru=* | --r=*) - runstatedir=$ac_optarg ;; - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ @@ -1550,7 +1539,7 @@ fi for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir runstatedir + libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. @@ -1703,7 +1692,6 @@ Fine tuning of the installation directories: --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] @@ -33508,8 +33496,8 @@ hurd) ;; hpux) PSCMD="UNIX95=1; ps -e -o pid,comm" - CFLAGS="${CFLAGS} -D_XOPEN_SOURCE_EXTENDED=1" - CXXFLAGS="${CXXFLAGS} -D_XOPEN_SOURCE_EXTENDED=1" + CFLAGS="${CFLAGS} -D_XOPEN_SOURCE_EXTENDED=1 -D_INCLUDE_XOPEN_SOURCE_EXTENDED=1" + CXXFLAGS="${CXXFLAGS} -D_XOPEN_SOURCE_EXTENDED=1 -D_INCLUDE_XOPEN_SOURCE_EXTENDED=1" TAPEDRIVE="/dev/rmt/0hnb" PTHREAD_LIB="-lpthread" From d7f3c9db3a563003fe56c308e3ba9f0d5f267eaa Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Tue, 24 Oct 2017 14:30:08 +0200 Subject: [PATCH 19/46] gfapi: Fix typo. Print the correct filename in the error message. --- src/plugins/filed/gfapi-fd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/plugins/filed/gfapi-fd.c b/src/plugins/filed/gfapi-fd.c index 2d75850f1a7..389e4da655a 100644 --- a/src/plugins/filed/gfapi-fd.c +++ b/src/plugins/filed/gfapi-fd.c @@ -697,7 +697,7 @@ static bRC get_next_file_to_backup(bpContext *ctx) if (status != 0) { berrno be; - Jmsg(ctx, M_ERROR, "gfapi-fd: glfs_stat(%s) failed: %s\n", p_ctx->cwd, be.bstrerror()); + Jmsg(ctx, M_ERROR, "gfapi-fd: glfs_stat(%s) failed: %s\n", p_ctx->next_filename, be.bstrerror()); return bRC_Error; } } else { From c337685948e07a003a431dcb941812abbfa4316d Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Tue, 24 Oct 2017 14:37:43 +0200 Subject: [PATCH 20/46] gfapi: Treat ENOENT as non-fatal error. When we are processing a Gluster Find Filelist we from now on treat a file not found no longer as error condition but just continue with the next file in the list. --- src/plugins/filed/gfapi-fd.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/plugins/filed/gfapi-fd.c b/src/plugins/filed/gfapi-fd.c index 389e4da655a..91b85444859 100644 --- a/src/plugins/filed/gfapi-fd.c +++ b/src/plugins/filed/gfapi-fd.c @@ -697,8 +697,13 @@ static bRC get_next_file_to_backup(bpContext *ctx) if (status != 0) { berrno be; - Jmsg(ctx, M_ERROR, "gfapi-fd: glfs_stat(%s) failed: %s\n", p_ctx->next_filename, be.bstrerror()); - return bRC_Error; + switch (errno) { + case ENOENT: + continue; + default: + Jmsg(ctx, M_ERROR, "gfapi-fd: glfs_stat(%s) failed: %s\n", p_ctx->next_filename, be.bstrerror()); + return bRC_Error; + } } } else { #ifndef HAVE_GLFS_READDIRPLUS From a40d7f2cf274d8a385e94f184b33b1dbab3f6d05 Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Sat, 28 Oct 2017 21:29:52 +0200 Subject: [PATCH 21/46] Logging of attribute inserting: use right Jmsg call We now use the correct Jmsg() (without Number) for the logging so that we do not get the filename and line like in debug messages. --- src/cats/sql_create.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/cats/sql_create.c b/src/cats/sql_create.c index 01b3800e145..93fc6119fb7 100644 --- a/src/cats/sql_create.c +++ b/src/cats/sql_create.c @@ -831,7 +831,7 @@ bool db_write_batch_file_records(JCR *jcr) Dmsg1(50,"db_create_file_record changes=%u\n",jcr->db_batch->changes); jcr->JobStatus = JS_AttrInserting; - Jmsg1(jcr, M_INFO, 0, "Insert of attributes batch table with %u entries start\n", jcr->db_batch->changes); + Jmsg(jcr, M_INFO, 0, "Insert of attributes batch table with %u entries start\n", jcr->db_batch->changes); if (!sql_batch_end(jcr, jcr->db_batch, NULL)) { Jmsg1(jcr, M_FATAL, 0, "Batch end %s\n", jcr->db_batch->errmsg); @@ -891,7 +891,7 @@ bool db_write_batch_file_records(JCR *jcr) } jcr->JobStatus = JobStatus; /* reset entry status */ - Jmsg0(jcr, M_INFO, 0, "Insert of attributes batch table done\n"); + Jmsg(jcr, M_INFO, 0, "Insert of attributes batch table done\n"); retval = true; bail_out: From 12f27d45226d04eff8ffdf95010c48f5a802feb0 Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Fri, 3 Nov 2017 03:51:16 -0500 Subject: [PATCH 22/46] HP-UX: set alignment pragma in hash table we now set pragma pack(4) in htable.c for hpux, as we otherwise get this error: Program received signal SIGBUS, Bus error si_code: 1 - BUS_ADRALN - Invalid address alignment. --- src/lib/htable.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/lib/htable.c b/src/lib/htable.c index 70c6cf6cba5..61e843d0444 100644 --- a/src/lib/htable.c +++ b/src/lib/htable.c @@ -40,6 +40,12 @@ * Kern Sibbald, July MMIII */ +#include "hostconfig.h" + +#ifdef HAVE_HPUX_OS +#pragma pack(4) +#endif + #include "bareos.h" #define B_PAGE_SIZE 4096 From c0c2c1a8d465851e7fa5cb6ab49951005f2d327e Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Mon, 6 Nov 2017 15:11:25 +0100 Subject: [PATCH 23/46] stored: Fix device not being closed problem. When we cannot read any volume header from a volume for whatever reason the current code will not re-open the device when retrying to mount the next available volume in the DCR::mount_next_write_volume() method. This fixes the code to explictly do a close on the device when we are retrying to mount the next volume and its not a tape device and the device is still open just before we call the autoload_device() function. --- src/stored/mount.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/stored/mount.c b/src/stored/mount.c index 7af3326c233..b617c6fb546 100644 --- a/src/stored/mount.c +++ b/src/stored/mount.c @@ -109,6 +109,7 @@ bool DCR::mount_next_write_volume() if (dev->must_unload()) { ask = true; /* ask operator to mount tape */ } + do_unload(); do_swapping(true /*is_writing*/); do_load(true /*is_writing*/); @@ -135,6 +136,18 @@ bool DCR::mount_next_write_volume() */ dcr->setVolCatInfo(false); /* out of date when Vols unlocked */ + /* + * See if this is a retry of the mounting of the next volume. + * If the device is already open close it first as otherwise we could + * potentially write to an already open device a new volume label. + * This is only interesting for non tape devices. + */ + if (!dev->is_tape()) { + if (retry && dev->is_open()) { + dev->close(dcr); + } + } + switch (autoload_device(dcr, true /* writing */, NULL)) { case -2: case -1: From 87acba4c021f90ab4c67900952eb74f445688a5b Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Thu, 9 Nov 2017 20:11:54 +0100 Subject: [PATCH 24/46] stored: Fix reading from a still inflight chunk. When we use io-threads we could end up in the situation where the data is still queued to be uploaded to the backing store while a new backup starts and will open the volume again and try to read the volume label. This used to fail as we always only looked at the backing store for the source of a volume chunk which was not available yet. We now clone the data from the ordered circular buffer back to the current in memory chunk buffer which gets newly allocated on re-open of the device and we also track any inflight data to the backing store e.g. chunks currently being uploaded, for such chunks we busy wait until they are uploaded or when uploading fails are placed back on the ordered circular list with pending chunk io requests. --- src/lib/ordered_cbuf.c | 10 ++ src/lib/ordered_cbuf.h | 3 +- src/stored/backends/chunked_device.c | 155 ++++++++++++++++++++++ src/stored/backends/chunked_device.h | 10 ++ src/stored/backends/object_store_device.c | 12 ++ 5 files changed, 189 insertions(+), 1 deletion(-) diff --git a/src/lib/ordered_cbuf.c b/src/lib/ordered_cbuf.c index 9b76300b533..6589ff17e8c 100644 --- a/src/lib/ordered_cbuf.c +++ b/src/lib/ordered_cbuf.c @@ -342,6 +342,16 @@ void *ordered_circbuf::peek(enum oc_peek_types type, item = (struct ocbuf_item *)m_data->next(item); } break; + case PEEK_CLONE: + item = (struct ocbuf_item *)m_data->first(); + while (item) { + if (callback(item->data, data) == 0) { + retval = data; + break; + } + item = (struct ocbuf_item *)m_data->next(item); + } + break; default: goto bail_out; } diff --git a/src/lib/ordered_cbuf.h b/src/lib/ordered_cbuf.h index 3279c14a774..0c2a9f9d3c2 100644 --- a/src/lib/ordered_cbuf.h +++ b/src/lib/ordered_cbuf.h @@ -31,7 +31,8 @@ enum oc_peek_types { PEEK_FIRST = 0, PEEK_LAST, - PEEK_LIST + PEEK_LIST, + PEEK_CLONE }; struct ocbuf_item { diff --git a/src/stored/backends/chunked_device.c b/src/stored/backends/chunked_device.c index 2798066f420..e8ddd61b25b 100644 --- a/src/stored/backends/chunked_device.c +++ b/src/stored/backends/chunked_device.c @@ -52,6 +52,9 @@ * * The public interfaces exported from this device are: * + * set_inflight_chunk() - Set the inflight flag for a chunk. + * clear_inflight_chunk() - Clear the inflight flag for a chunk. + * is_inflight_chunk() - Is a chunk current inflight to the backing store. * setup_chunk() - Setup a chunked volume for reading or writing. * read_chunked() - Read a chunked volume. * write_chunked() - Write a chunked volume. @@ -245,6 +248,64 @@ void chunked_device::stop_threads() } } +/* + * Set the inflight flag for a chunk. + */ +bool chunked_device::set_inflight_chunk(chunk_io_request *request) +{ + int fd; + POOL_MEM inflight_file(PM_FNAME); + + Mmsg(inflight_file, "%s/%s@%04d", me->working_directory, request->volname, request->chunk); + pm_strcat(inflight_file, "%inflight"); + + Dmsg3(100, "Creating inflight file %s for volume %s, chunk %d\n", + inflight_file.c_str(), request->volname, request->chunk); + + fd = ::open(inflight_file.c_str(), O_CREAT | O_EXCL | O_WRONLY, 0640); + if (fd >= 0) { + ::close(fd); + } else { + return false; + } + + return true; +} + +/* + * Clear the inflight flag for a chunk. + */ +void chunked_device::clear_inflight_chunk(chunk_io_request *request) +{ + POOL_MEM inflight_file(PM_FNAME); + + Mmsg(inflight_file, "%s/%s@%04d", me->working_directory, request->volname, request->chunk); + pm_strcat(inflight_file, "%inflight"); + + Dmsg3(100, "Removing inflight file %s for volume %s, chunk %d\n", + inflight_file.c_str(), request->volname, request->chunk); + + ::unlink(inflight_file.c_str()); +} + +/* + * Check if a certain chunk is inflight to the backing store. + */ +bool chunked_device::is_inflight_chunk(chunk_io_request *request) +{ + struct stat st; + POOL_MEM inflight_file(PM_FNAME); + + Mmsg(inflight_file, "%s/%s@%04d", me->working_directory, request->volname, request->chunk); + pm_strcat(inflight_file, "%inflight"); + + if (stat(inflight_file.c_str(), &st) == 0) { + return true; + } + + return false; +} + /* * Call back function for comparing two chunk_io_requests. */ @@ -978,6 +1039,27 @@ ssize_t chunked_device::chunked_volume_size() return chunked_remote_volume_size(); } +static int clone_io_request(void *item1, void *item2) +{ + chunk_io_request *src = (chunk_io_request *)item1; + chunk_io_request *dst = (chunk_io_request *)item2; + + if (bstrcmp(src->volname, dst->volname) && src->chunk == dst->chunk) { + memcpy(dst->buffer, src->buffer, src->wbuflen); + *dst->rbuflen = src->wbuflen; + + /* + * Cloning succeeded. + */ + return 0; + } + + /* + * Not the right volname or chunk. + */ + return -1; +} + /* * Make sure we have the right chunk in memory. */ @@ -1000,6 +1082,77 @@ bool chunked_device::load_chunk() if (start_offset != m_current_chunk->start_offset) { m_current_chunk->buflen = 0; m_current_chunk->start_offset = start_offset; + + /* + * See if we are using io-threads or not and the ordered circbuf is created. + * We try to make sure that nothing of the volume being requested is still inflight as then + * the read_chunk() method will fail to read the data as its not stored on the backing + * store yet. + */ + if (m_io_threads > 0 && m_cb) { + chunk_io_request request; + + request.chunk = m_current_chunk->start_offset / m_current_chunk->chunk_size; + request.volname = m_current_volname; + request.buffer = m_current_chunk->buffer; + request.rbuflen = &m_current_chunk->buflen; + + while (1) { + if (!m_cb->empty()) { + /* + * Peek on the ordered circular queue and clone the data which is infligt back to the + * current chunk buffer. When we are able to clone the data the peek will return the + * address of the request structure it used for the clone operation. When nothing could + * be cloned it will return NULL. If data is cloned we use that and skip the call to + * read the data from the backing store as that will not have the latest data anyway. + */ + if (m_cb->peek(PEEK_CLONE, &request, clone_io_request) == &request) { + goto bail_out; + } + } + + /* + * Chunk doesn't seem to be on the ordered circular buffer. + * Make sure its also not inflight to the backing store. + */ + if (is_inflight_chunk(&request)) { + uint8_t retries = INFLIGHT_RETRIES; + + /* + * Chunk seems to be inflight busy wait until its no longer. + * It either gets uploaded and as such we can just read it from the backing store + * again or it gets put back onto the ordered circular list and then we can pick + * it up by retrying the PEEK_CLONE on the ordered circular list. + */ + do { + bmicrosleep(INFLIGT_RETRY_TIME, 0); + } while (is_inflight_chunk(&request) && --retries > 0); + + /* + * If we ran out of retries we most likely encountered a stale inflight file. + */ + if (!retries) { + clear_inflight_chunk(&request); + break; + } + + /* + * Do a new try to clone the data from the ordered circular list. + */ + continue; + } else { + /* + * Its not on the ordered circular list and not inflight so it must be on the + * backing store so we break the loop and try to read the chunk from the backing store. + */ + break; + } + } + } + + /* + * Read the chunk from the backing store. + */ if (!read_chunk()) { switch (dev_errno) { case EIO: @@ -1012,6 +1165,8 @@ bool chunked_device::load_chunk() } } } + +bail_out: m_current_chunk->chunk_setup = true; return true; diff --git a/src/stored/backends/chunked_device.h b/src/stored/backends/chunked_device.h index 53a24bee5ed..50ba0b1deec 100644 --- a/src/stored/backends/chunked_device.h +++ b/src/stored/backends/chunked_device.h @@ -46,6 +46,13 @@ */ #define MAX_CHUNKS 10000 +/* + * Busy wait retry for inflight chunks. + * Default 12 * 5 = 60 seconds. + */ +#define INFLIGHT_RETRIES 12 +#define INFLIGT_RETRY_TIME 5 + enum thread_wait_type { WAIT_CANCEL_THREAD, /* Perform a pthread_cancel() on exit. */ WAIT_JOIN_THREAD /* Perform a pthread_join() on exit. */ @@ -116,6 +123,9 @@ class chunked_device: public DEVICE { /* * Protected Methods */ + bool set_inflight_chunk(chunk_io_request *request); + void clear_inflight_chunk(chunk_io_request *request); + bool is_inflight_chunk(chunk_io_request *request); void setup_chunk(int flags); ssize_t read_chunked(int fd, void *buffer, size_t count); ssize_t write_chunked(int fd, const void *buffer, size_t count); diff --git a/src/stored/backends/object_store_device.c b/src/stored/backends/object_store_device.c index 6ca49a82441..7edb52f525c 100644 --- a/src/stored/backends/object_store_device.c +++ b/src/stored/backends/object_store_device.c @@ -285,6 +285,13 @@ bool object_store_device::flush_remote_chunk(chunk_io_request *request) Mmsg(chunk_dir, "/%s", request->volname); Mmsg(chunk_name, "%s/%04d", chunk_dir.c_str(), request->chunk); + /* + * Set that we are uploading the chunk. + */ + if (!set_inflight_chunk(request)) { + goto bail_out; + } + Dmsg1(100, "Flushing chunk %s\n", chunk_name.c_str()); /* @@ -386,6 +393,11 @@ bool object_store_device::flush_remote_chunk(chunk_io_request *request) retval = true; bail_out: + /* + * Clear that we are uploading the chunk. + */ + clear_inflight_chunk(request); + if (sysmd) { dpl_sysmd_free(sysmd); } From 67eb24aa387e095d4d805262df24cba7d6c342f4 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Thu, 9 Nov 2017 20:11:54 +0100 Subject: [PATCH 25/46] stored: Only pass the data pointer to the update function. We do not have to expose the internal structure used on the ordered circular list when calling the update function. We more or less cloned the compare function which is used by the dlist::binary_insert() method which operates on the actual list item and not on the data contained in that list item. --- src/lib/ordered_cbuf.c | 2 +- src/stored/backends/chunked_device.c | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/lib/ordered_cbuf.c b/src/lib/ordered_cbuf.c index 6589ff17e8c..3be4fa6ac13 100644 --- a/src/lib/ordered_cbuf.c +++ b/src/lib/ordered_cbuf.c @@ -135,7 +135,7 @@ void *ordered_circbuf::enqueue(void *data, * e.g. replace the old with the new data but don't allocate a new * item on the ordered circular list. */ - update(item, new_item); + update(item->data, new_item->data); /* * Release the unused ocbuf_item. diff --git a/src/stored/backends/chunked_device.c b/src/stored/backends/chunked_device.c index e8ddd61b25b..61f217a76b0 100644 --- a/src/stored/backends/chunked_device.c +++ b/src/stored/backends/chunked_device.c @@ -338,10 +338,8 @@ static int compare_chunk_io_request(void *item1, void *item2) */ static void update_chunk_io_request(void *item1, void *item2) { - ocbuf_item *ocbuf1 = (ocbuf_item *)item1; - ocbuf_item *ocbuf2 = (ocbuf_item *)item2; - chunk_io_request *chunk1 = (chunk_io_request *)ocbuf1->data; - chunk_io_request *chunk2 = (chunk_io_request *)ocbuf2->data; + chunk_io_request *chunk1 = (chunk_io_request *)item1; + chunk_io_request *chunk2 = (chunk_io_request *)item2; /* * See if the new chunk_io_request has more bytes then From f419ad4748b25e5fb06170f3ff3ebdf5a10376b9 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Thu, 9 Nov 2017 20:11:54 +0100 Subject: [PATCH 26/46] stored: Limit retries to flush to backing store. Make it possible to configure an upper limit on the number of tries that the chunked volumes abstraction uses to upload the data to the backing store. When this number of tries is exceeded the chunk is dropped and the device is put into read-only mode not allowing any more writes or (re)opens with the write flag set. --- src/stored/backends/chunked_device.c | 40 ++++++++++++++++++++++- src/stored/backends/chunked_device.h | 5 ++- src/stored/backends/object_store_device.c | 17 +++++----- 3 files changed, 52 insertions(+), 10 deletions(-) diff --git a/src/stored/backends/chunked_device.c b/src/stored/backends/chunked_device.c index 61f217a76b0..a8a4ea33467 100644 --- a/src/stored/backends/chunked_device.c +++ b/src/stored/backends/chunked_device.c @@ -382,6 +382,7 @@ bool chunked_device::enqueue_chunk(chunk_io_request *request) new_request->chunk = request->chunk; new_request->buffer = request->buffer; new_request->wbuflen = request->wbuflen; + new_request->tries = 0; new_request->release = request->release; Dmsg2(100, "Allocated chunk io request of %d bytes at %p\n", sizeof(chunk_io_request), new_request); @@ -459,6 +460,18 @@ bool chunked_device::dequeue_chunk() if (!flush_remote_chunk(new_request)) { chunk_io_request *enqueued_request; + /* + * See if we have a maximum number of retries to upload chunks to the backing store + * and if we have and execeeded those tries for this chunk set the device to read-only + * so any next write to the device will error out. This should prevent us from hanging + * the flushing to the backing store on misconfigured devices. + */ + new_request->tries++; + if (m_retries > 0 && new_request->tries >= m_retries) { + m_readonly = true; + goto bail_out; + } + /* * We failed to flush the chunk to the backing store * so enqueue it again using the reserved slot by dequeue() @@ -502,6 +515,7 @@ bool chunked_device::dequeue_chunk() continue; } +bail_out: /* * Unreserve the slot on the ordered circular buffer reserved by dequeue(). */ @@ -610,8 +624,18 @@ bool chunked_device::read_chunk() /* * Setup a chunked volume for reading or writing. */ -void chunked_device::setup_chunk(int flags) +int chunked_device::setup_chunk(const char *pathname, int flags, int mode) { + /* + * If device is (re)opened and we are put into readonly mode because + * of problems flushing chunks to the backing store we return EROFS + * to the upper layers. + */ + if ((flags & O_RDWR) && m_readonly) { + dev_errno = EROFS; + return -1; + } + if (!m_current_chunk) { m_current_chunk = (chunk_descriptor *)malloc(sizeof(chunk_descriptor)); memset(m_current_chunk, 0, sizeof(chunk_descriptor)); @@ -668,6 +692,8 @@ void chunked_device::setup_chunk(int flags) } m_current_volname = bstrdup(getVolCatName()); + + return 0; } /* @@ -811,6 +837,16 @@ ssize_t chunked_device::write_chunked(int fd, const void *buffer, size_t count) { ssize_t retval = 0; + /* + * If we are put into readonly mode because of problems flushing chunks to the + * backing store we return EIO to the upper layers. + */ + if (m_readonly) { + errno = EIO; + retval = -1; + goto bail_out; + } + if (m_current_chunk->opened) { ssize_t wanted_offset; @@ -1251,9 +1287,11 @@ chunked_device::chunked_device() m_current_chunk = NULL; m_io_threads = 0; m_io_slots = 0; + m_retries = 0; m_chunk_size = 0; m_io_threads_started = false; m_end_of_media = false; + m_readonly = false; m_cb = NULL; m_io_threads = 0; m_chunk_size = 0; diff --git a/src/stored/backends/chunked_device.h b/src/stored/backends/chunked_device.h index 50ba0b1deec..dff3f0b5fe9 100644 --- a/src/stored/backends/chunked_device.h +++ b/src/stored/backends/chunked_device.h @@ -69,6 +69,7 @@ struct chunk_io_request { char *buffer; /* Data */ uint32_t wbuflen; /* Size of the actual valid data in the chunk (Write) */ uint32_t *rbuflen; /* Size of the actual valid data in the chunk (Read) */ + uint8_t tries; /* Number of times the flush was tried to the backing store */ bool release; /* Should we release the data to which the buffer points ? */ }; @@ -93,6 +94,7 @@ class chunked_device: public DEVICE { */ bool m_io_threads_started; bool m_end_of_media; + bool m_readonly; char *m_current_volname; ordered_circbuf *m_cb; alist *m_thread_ids; @@ -116,6 +118,7 @@ class chunked_device: public DEVICE { */ uint8_t m_io_threads; uint8_t m_io_slots; + uint8_t m_retries; uint64_t m_chunk_size; boffset_t m_offset; bool m_use_mmap; @@ -126,7 +129,7 @@ class chunked_device: public DEVICE { bool set_inflight_chunk(chunk_io_request *request); void clear_inflight_chunk(chunk_io_request *request); bool is_inflight_chunk(chunk_io_request *request); - void setup_chunk(int flags); + int setup_chunk(const char *pathname, int flags, int mode); ssize_t read_chunked(int fd, void *buffer, size_t count); ssize_t write_chunked(int fd, const void *buffer, size_t count); int close_chunk(); diff --git a/src/stored/backends/object_store_device.c b/src/stored/backends/object_store_device.c index 7edb52f525c..f9d78d039f5 100644 --- a/src/stored/backends/object_store_device.c +++ b/src/stored/backends/object_store_device.c @@ -55,6 +55,7 @@ enum device_option_type { argument_chunksize, argument_iothreads, argument_ioslots, + argument_retries, argument_mmap }; @@ -73,6 +74,7 @@ static device_option device_options[] = { { "chunksize=", argument_chunksize, 10 }, { "iothreads=", argument_iothreads, 10 }, { "ioslots=", argument_ioslots, 8 }, + { "retries=", argument_retries, 8 }, { "mmap", argument_mmap, 4 }, { NULL, argument_none } }; @@ -612,6 +614,11 @@ bool object_store_device::initialize() m_io_slots = value & 0xFF; done = true; break; + case argument_retries: + size_to_uint64(bp + device_options[i].compare_size, &value); + m_retries = value & 0xFF; + done = true; + break; case argument_mmap: m_use_mmap = true; done = true; @@ -752,17 +759,11 @@ bool object_store_device::initialize() */ int object_store_device::d_open(const char *pathname, int flags, int mode) { - int retval = -1; - if (!initialize()) { - goto bail_out; + return -1; } - setup_chunk(flags); - retval = 0; - -bail_out: - return retval; + return setup_chunk(pathname, flags, mode); } /* From a5e5e4dfb050e89ed0fc14ad5cbb43568e41d9dd Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Mon, 13 Nov 2017 11:00:33 +0100 Subject: [PATCH 27/46] stored: Show error when we set storage device readonly. --- src/stored/backends/chunked_device.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/stored/backends/chunked_device.c b/src/stored/backends/chunked_device.c index a8a4ea33467..bc97e00aff4 100644 --- a/src/stored/backends/chunked_device.c +++ b/src/stored/backends/chunked_device.c @@ -468,6 +468,9 @@ bool chunked_device::dequeue_chunk() */ new_request->tries++; if (m_retries > 0 && new_request->tries >= m_retries) { + Mmsg4(errmsg, _("Unable to flush chunk %d of volume %s to backing store after %d tries, setting device %s readonly\n"), + new_request->chunk, new_request->volname, new_request->tries, print_name()); + Emsg0(M_ERROR, 0, errmsg); m_readonly = true; goto bail_out; } From 7c0a0310dae4da66d7aa555a743c3e660e99ac5d Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Mon, 13 Nov 2017 14:41:24 +0100 Subject: [PATCH 28/46] stored: Fix getting size of a chunked volume. We could end up in trying to get the current volume size from the backing store while the last chunk of the volume was still inflight to the backing store. Previously we added support for getting the size from the ordered circular list but that is only part of the time it takes for a volume to get flushed to the backing store. When the last volume chunk is still inflight we get the wrong size when we list the volume chunks on the backing store as the chunk might be still growing in size while being uploaded. --- src/stored/backends/chunked_device.c | 134 +++++++++++++++++++++------ src/stored/backends/chunked_device.h | 2 + 2 files changed, 107 insertions(+), 29 deletions(-) diff --git a/src/stored/backends/chunked_device.c b/src/stored/backends/chunked_device.c index bc97e00aff4..d9205ed5afa 100644 --- a/src/stored/backends/chunked_device.c +++ b/src/stored/backends/chunked_device.c @@ -36,6 +36,8 @@ #endif #endif +static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; + /* * This implements a device abstraction that provides so called chunked * volumes. These chunks are kept in memory and flushed to the backing @@ -55,6 +57,7 @@ * set_inflight_chunk() - Set the inflight flag for a chunk. * clear_inflight_chunk() - Clear the inflight flag for a chunk. * is_inflight_chunk() - Is a chunk current inflight to the backing store. + * nr_inflight_chunks() - Number of chunks inflight to the backing store. * setup_chunk() - Setup a chunked volume for reading or writing. * read_chunked() - Read a chunked volume. * write_chunked() - Write a chunked volume. @@ -264,6 +267,9 @@ bool chunked_device::set_inflight_chunk(chunk_io_request *request) fd = ::open(inflight_file.c_str(), O_CREAT | O_EXCL | O_WRONLY, 0640); if (fd >= 0) { + P(mutex); + m_inflight_chunks++; + V(mutex); ::close(fd); } else { return false; @@ -277,15 +283,26 @@ bool chunked_device::set_inflight_chunk(chunk_io_request *request) */ void chunked_device::clear_inflight_chunk(chunk_io_request *request) { + struct stat st; POOL_MEM inflight_file(PM_FNAME); - Mmsg(inflight_file, "%s/%s@%04d", me->working_directory, request->volname, request->chunk); - pm_strcat(inflight_file, "%inflight"); + if (request) { + Mmsg(inflight_file, "%s/%s@%04d", me->working_directory, request->volname, request->chunk); + pm_strcat(inflight_file, "%inflight"); - Dmsg3(100, "Removing inflight file %s for volume %s, chunk %d\n", - inflight_file.c_str(), request->volname, request->chunk); + Dmsg3(100, "Removing inflight file %s for volume %s, chunk %d\n", + inflight_file.c_str(), request->volname, request->chunk); + + if (stat(inflight_file.c_str(), &st) != 0) { + return; + } + + ::unlink(inflight_file.c_str()); + } - ::unlink(inflight_file.c_str()); + P(mutex); + m_inflight_chunks++; + V(mutex); } /* @@ -306,6 +323,20 @@ bool chunked_device::is_inflight_chunk(chunk_io_request *request) return false; } +/* + * Number of inflight chunks to the backing store. + */ +int chunked_device::nr_inflight_chunks() +{ + int retval = 0; + + P(mutex); + retval = m_inflight_chunks; + V(mutex); + + return retval; +} + /* * Call back function for comparing two chunk_io_requests. */ @@ -1035,38 +1066,82 @@ static int compare_volume_name(void *item1, void *item2) ssize_t chunked_device::chunked_volume_size() { /* - * See if we are using io-threads or not and the ordered circbuf is created and not empty. + * See if we are using io-threads or not and the ordered circbuf is created. + * We try to make sure that nothing of the volume being requested is still inflight as then + * the chunked_remote_volume_size() method will fail to determine the size of the data as + * its not fully stored on the backing store yet. */ - if (m_io_threads > 0 && m_cb && !m_cb->empty()) { - char *volname; - chunk_io_request *request; + if (m_io_threads > 0 && m_cb) { + while (1) { + if (!m_cb->empty()) { + chunk_io_request *request; - volname = getVolCatName(); + /* + * Peek on the ordered circular queue if there are any pending IO-requests + * for this volume. If there are use that as the indication of the size of + * the volume and don't contact the remote storage as there is still data + * inflight and as such we need to look at the last chunk that is still not + * uploaded of the volume. + */ + request = (chunk_io_request *)m_cb->peek(PEEK_LAST, m_current_volname, compare_volume_name); + if (request) { + ssize_t retval; - /* - * Peek on the ordered circular queue if there are any pending IO-requests - * for this volume. If there are use that as the indication of the size of - * the volume and don't contact the remote storage as there is still data - * inflight and as such we need to look at the last chunk that is still not - * uploaded of the volume. - */ - request = (chunk_io_request *)m_cb->peek(PEEK_LAST, volname, compare_volume_name); - if (request) { - ssize_t retval; + /* + * Calculate the size of the volume based on the last chunk inflight. + */ + retval = (request->chunk * m_current_chunk->chunk_size) + request->wbuflen; - /* - * Calculate the size of the volume based on the last chunk inflight . - */ - retval = (request->chunk * m_current_chunk->chunk_size) + request->wbuflen; + /* + * The peek method gives us a cloned chunk_io_request with pointers to + * the original chunk_io_request. We just need to free the structure not + * the content so we call free() here and not free_chunk_io_request() ! + */ + free(request); + + return retval; + } + } /* - * The peek method gives us a cloned chunk_io_request with pointers to - * the original chunk_io_request. We just need to free the structure not - * the content so we call free() here and not free_chunk_io_request() ! + * Chunk doesn't seem to be on the ordered circular buffer. + * Make sure there is also nothing inflight to the backing store anymore. */ - free(request); + if (nr_inflight_chunks() > 0) { + uint8_t retries = INFLIGHT_RETRIES; + + /* + * There seem to be inflight chunks to the backing store so busy wait until there + * is nothing inflight anymore. The chunks either get uploaded and as such we + * can just get the volume size from the backing store or it gets put back onto + * the ordered circular list and then we can pick it up by retrying the PEEK_LAST + * on the ordered circular list. + */ + do { + bmicrosleep(INFLIGT_RETRY_TIME, 0); + } while (nr_inflight_chunks() > 0 && --retries > 0); - return retval; + /* + * If we ran out of retries we most likely encountered a stale inflight file. + */ + if (!retries) { + clear_inflight_chunk(NULL); + break; + } + + /* + * Do a new try on the ordered circular list to get the last pending IO-request + * for the volume we are trying to get the size of. + */ + continue; + } else { + /* + * Its not on the ordered circular list and not inflight so it must be on the + * backing store so we break the loop and try to get the volume size from the + * chunks available on the backing store. + */ + break; + } } } @@ -1295,6 +1370,7 @@ chunked_device::chunked_device() m_io_threads_started = false; m_end_of_media = false; m_readonly = false; + m_inflight_chunks = 0; m_cb = NULL; m_io_threads = 0; m_chunk_size = 0; diff --git a/src/stored/backends/chunked_device.h b/src/stored/backends/chunked_device.h index dff3f0b5fe9..957106790a3 100644 --- a/src/stored/backends/chunked_device.h +++ b/src/stored/backends/chunked_device.h @@ -95,6 +95,7 @@ class chunked_device: public DEVICE { bool m_io_threads_started; bool m_end_of_media; bool m_readonly; + uint8_t m_inflight_chunks; char *m_current_volname; ordered_circbuf *m_cb; alist *m_thread_ids; @@ -129,6 +130,7 @@ class chunked_device: public DEVICE { bool set_inflight_chunk(chunk_io_request *request); void clear_inflight_chunk(chunk_io_request *request); bool is_inflight_chunk(chunk_io_request *request); + int nr_inflight_chunks(); int setup_chunk(const char *pathname, int flags, int mode); ssize_t read_chunked(int fd, void *buffer, size_t count); ssize_t write_chunked(int fd, const void *buffer, size_t count); From 9689268884a55f99a244160fb1c0d3790c9ab0b8 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Mon, 13 Nov 2017 22:15:49 +0100 Subject: [PATCH 29/46] dird: Store blocksize using CFG_TYPE_SIZE32 and not CFG_TYPE_PINT32 For uniformity we should parse blocksize everywhere using CFG_TYPE_SIZE32 and not CFG_TYPE_PINT32. The nice side effect of using CFG_TYPE_SIZE32 is that one can use suffixes like Kb/Mb etc. It seems in most places we already use CFG_TYPE_SIZE32 or a special type like CFG_TYPE_MAXBLOCKSIZE which is parsing an CFG_TYPE_SIZE32 and checking for a maximum. --- src/dird/dird_conf.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/dird/dird_conf.c b/src/dird/dird_conf.c index a7a30bc7632..a2478e0b991 100644 --- a/src/dird/dird_conf.c +++ b/src/dird/dird_conf.c @@ -245,7 +245,7 @@ static RES_ITEM cli_items[] = { { "MaximumConcurrentJobs", CFG_TYPE_PINT32, ITEM(res_client.MaxConcurrentJobs), 0, CFG_ITEM_DEFAULT, "1", NULL, NULL }, { "MaximumBandwidthPerJob", CFG_TYPE_SPEED, ITEM(res_client.max_bandwidth), 0, 0, NULL, NULL, NULL }, { "NdmpLogLevel", CFG_TYPE_PINT32, ITEM(res_client.ndmp_loglevel), 0, CFG_ITEM_DEFAULT, "4", NULL, NULL }, - { "NdmpBlockSize", CFG_TYPE_PINT32, ITEM(res_client.ndmp_blocksize), 0, CFG_ITEM_DEFAULT, "64512", NULL, NULL }, + { "NdmpBlockSize", CFG_TYPE_SIZE32, ITEM(res_client.ndmp_blocksize), 0, CFG_ITEM_DEFAULT, "64512", NULL, NULL }, { "NdmpUseLmdb", CFG_TYPE_BOOL, ITEM(res_client.ndmp_use_lmdb), 0, CFG_ITEM_DEFAULT, "true", NULL, NULL }, TLS_CONFIG(res_client) { NULL, 0, { 0 }, 0, 0, NULL, NULL, NULL } @@ -507,8 +507,8 @@ static RES_ITEM pool_items[] = { { "Catalog", CFG_TYPE_RES, ITEM(res_pool.catalog), R_CATALOG, 0, NULL, NULL, NULL }, { "FileRetention", CFG_TYPE_TIME, ITEM(res_pool.FileRetention), 0, 0, NULL, NULL, NULL }, { "JobRetention", CFG_TYPE_TIME, ITEM(res_pool.JobRetention), 0, 0, NULL, NULL, NULL }, - { "MinimumBlockSize", CFG_TYPE_PINT32, ITEM(res_pool.MinBlocksize), 0, 0, NULL, NULL, NULL }, - { "MaximumBlockSize", CFG_TYPE_PINT32, ITEM(res_pool.MaxBlocksize), 0, 0, NULL, "14.2.0-", NULL }, + { "MinimumBlockSize", CFG_TYPE_SIZE32, ITEM(res_pool.MinBlocksize), 0, 0, NULL, NULL, NULL }, + { "MaximumBlockSize", CFG_TYPE_SIZE32, ITEM(res_pool.MaxBlocksize), 0, 0, NULL, "14.2.0-", NULL }, { NULL, 0, { 0 }, 0, 0, NULL, NULL, NULL } }; From 627d88e1eae4080bba558c91c737b80542e4298a Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Fri, 17 Nov 2017 15:08:17 +0100 Subject: [PATCH 30/46] HP-UX: also alignment pragma in accurate hash table --- src/filed/accurate.h | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/filed/accurate.h b/src/filed/accurate.h index 82203455407..0237740a48d 100644 --- a/src/filed/accurate.h +++ b/src/filed/accurate.h @@ -40,6 +40,13 @@ * disadvantage that we need to keep a filenr to index the bitmap which * also cost some bytes. */ + +#include "hostconfig.h" + +#ifdef HAVE_HPUX_OS +#pragma pack(4) +#endif + struct accurate_payload { int64_t filenr; int32_t delta_seq; From 5b77bcff18b100ccf1636364752311cd49d187df Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Fri, 17 Nov 2017 16:24:30 -0600 Subject: [PATCH 31/46] HP-UX: undefine posix_fadvise There is a problem with the HP-UX header file defining posix_fadivise, as it is not correctly defined there for use from c++ For now we disable posix_fadvise. --- src/include/bareos.h | 1 + 1 file changed, 1 insertion(+) diff --git a/src/include/bareos.h b/src/include/bareos.h index 05bd5462b8b..b969c805f1c 100644 --- a/src/include/bareos.h +++ b/src/include/bareos.h @@ -44,6 +44,7 @@ #ifdef HAVE_HPUX_OS #undef HAVE_LCHMOD +#undef HAVE_POSIX_FADVISE #endif #define _REENTRANT 1 From 4933d4d166b274d2cb6532123219125eacdf3262 Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Fri, 17 Nov 2017 18:15:48 -0600 Subject: [PATCH 32/46] HP-UX: cleanup the pragma pack definitions. --- src/filed/accurate.h | 7 ++++++- src/lib/htable.c | 6 ------ src/lib/htable.h | 11 +++++++++++ src/lib/tree.h | 10 ++++++++++ 4 files changed, 27 insertions(+), 7 deletions(-) diff --git a/src/filed/accurate.h b/src/filed/accurate.h index 0237740a48d..35a0c90ea14 100644 --- a/src/filed/accurate.h +++ b/src/filed/accurate.h @@ -44,7 +44,7 @@ #include "hostconfig.h" #ifdef HAVE_HPUX_OS -#pragma pack(4) +#pragma pack(push,4) #endif struct accurate_payload { @@ -54,6 +54,7 @@ struct accurate_payload { char *chksum; }; + /* * Accurate payload storage abstraction classes. */ @@ -96,6 +97,10 @@ struct CurFile { accurate_payload payload; }; +#ifdef HAVE_HPUX_OS +#pragma pack(pop) +#endif + class B_ACCURATE_HTABLE: public B_ACCURATE { protected: htable *m_file_list; diff --git a/src/lib/htable.c b/src/lib/htable.c index 61e843d0444..70c6cf6cba5 100644 --- a/src/lib/htable.c +++ b/src/lib/htable.c @@ -40,12 +40,6 @@ * Kern Sibbald, July MMIII */ -#include "hostconfig.h" - -#ifdef HAVE_HPUX_OS -#pragma pack(4) -#endif - #include "bareos.h" #define B_PAGE_SIZE 4096 diff --git a/src/lib/htable.h b/src/lib/htable.h index 3bb7fa9dff2..d8631662da9 100644 --- a/src/lib/htable.h +++ b/src/lib/htable.h @@ -45,6 +45,13 @@ (*((void **)&(var))=(void *)((tbl)->next()))) #endif + +#include "hostconfig.h" + +#ifdef HAVE_HPUX_OS +#pragma pack(push,4) +#endif + typedef enum { KEY_TYPE_CHAR = 1, KEY_TYPE_UINT32 = 2, @@ -74,6 +81,10 @@ struct h_mem { char first[1]; /* First byte */ }; +#ifdef HAVE_HPUX_OS +#pragma pack(pop) +#endif + class htable : public SMARTALLOC { hlink **table; /* Hash table */ int loffset; /* Link offset in item */ diff --git a/src/lib/tree.h b/src/lib/tree.h index 17c64617753..8c9d3ae71e5 100644 --- a/src/lib/tree.h +++ b/src/lib/tree.h @@ -26,6 +26,12 @@ #include "htable.h" +#include "hostconfig.h" + +#ifdef HAVE_HPUX_OS +#pragma pack(push,4) +#endif + struct s_mem { struct s_mem *next; /* next buffer */ int rem; /* remaining bytes */ @@ -118,6 +124,10 @@ struct s_hl_entry { }; typedef struct s_hl_entry HL_ENTRY; +#ifdef HAVE_HPUX_OS +#pragma pack(pop) +#endif + /* type values */ #define TN_ROOT 1 /* root node */ #define TN_NEWDIR 2 /* created directory to fill path */ From 3b385e65f7b316fc4cd53ff9fac30a7f911af0fd Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Tue, 14 Nov 2017 05:02:57 -0600 Subject: [PATCH 33/46] make install for plugins: accept empty BUILD_PLUGIN lists --- src/plugins/dird/Makefile.in | 7 ++++--- src/plugins/filed/Makefile.in | 4 +++- src/plugins/stored/Makefile.in | 6 ++++-- 3 files changed, 11 insertions(+), 6 deletions(-) diff --git a/src/plugins/dird/Makefile.in b/src/plugins/dird/Makefile.in index 32b0425c14a..d9e2d41331f 100644 --- a/src/plugins/dird/Makefile.in +++ b/src/plugins/dird/Makefile.in @@ -52,11 +52,12 @@ plugtest: Makefile dir_plugins.c \ install: all $(MKDIR) $(DESTDIR)$(plugindir) $(CP) *.py *.py.template $(DESTDIR)$(plugindir) - @for plugin in $(BUILD_PLUGINS); do \ + if test "x$(BUILD_PLUGINS)" != "x" ; then \ + for plugin in $(BUILD_PLUGINS); do \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $$plugin $(DESTDIR)$(plugindir); \ $(RMF) $(DESTDIR)$(plugindir)/$$plugin; \ - done - + done; \ + fi install-test-plugin: all $(MKDIR) $(DESTDIR)$(plugindir) $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) example-plugin-dir.la $(DESTDIR)$(plugindir) diff --git a/src/plugins/filed/Makefile.in b/src/plugins/filed/Makefile.in index 2f181e03ba2..a020b652164 100644 --- a/src/plugins/filed/Makefile.in +++ b/src/plugins/filed/Makefile.in @@ -96,6 +96,7 @@ install: all $(MKDIR) ${DESTDIR}${confdir}/bareos-dir.d/ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) bpipe-fd.la $(DESTDIR)$(plugindir) $(RMF) $(DESTDIR)$(plugindir)/bpipe-fd.la + if test "x$(BUILD_PLUGINS)" != "x" ; then \ for plugin in $(BUILD_PLUGINS); do \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $$plugin $(DESTDIR)$(plugindir); \ $(RMF) $(DESTDIR)$(plugindir)/$$plugin; \ @@ -113,7 +114,8 @@ install: all $(INSTALL_CONFIG) python-ldap-conf.d/$$i ${DESTDIR}${confdir}/$$i; \ done; \ fi; \ - done + done;\ + fi install-test-plugin: all $(MKDIR) $(DESTDIR)$(plugindir) diff --git a/src/plugins/stored/Makefile.in b/src/plugins/stored/Makefile.in index db74a40ec54..b6b84fe5392 100644 --- a/src/plugins/stored/Makefile.in +++ b/src/plugins/stored/Makefile.in @@ -72,10 +72,12 @@ plugtest: Makefile sd_plugins.c \ install: all $(MKDIR) $(DESTDIR)$(plugindir) $(CP) *.py *.py.template $(DESTDIR)$(plugindir) - @for plugin in $(BUILD_PLUGINS); do \ + if test "x$(BUILD_PLUGINS)" != "x" ; then \ + for plugin in $(BUILD_PLUGINS); do \ $(LIBTOOL_INSTALL) $(INSTALL_PROGRAM) $$plugin $(DESTDIR)$(plugindir); \ $(RMF) $(DESTDIR)$(plugindir)/$$plugin; \ - done + done; \ + fi install-test-plugin: $(MKDIR) $(DESTDIR)$(plugindir) From 74d8d1da37cab68e812350aa1ab65c5691066243 Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Fri, 13 Oct 2017 18:55:54 +0200 Subject: [PATCH 34/46] bareos-storage-objectstorage: enable packaging packaging is done for RHEL7 and SLES12. (cherry picked from commit fd8597503a540d524bfce1b4c28da3f18931de65) --- platforms/packaging/bareos.spec | 16 ++++++++++------ 1 file changed, 10 insertions(+), 6 deletions(-) diff --git a/platforms/packaging/bareos.spec b/platforms/packaging/bareos.spec index 7083b06383b..50e0f7b0d2f 100644 --- a/platforms/packaging/bareos.spec +++ b/platforms/packaging/bareos.spec @@ -82,6 +82,7 @@ Vendor: The Bareos Team # SLES 12 %if 0%{?suse_version} == 1315 && 0%{?is_opensuse} == 0 %define ceph 1 +%define objectstorage 1 %endif # @@ -106,11 +107,15 @@ Vendor: The Bareos Team %define python_plugins 0 %endif -%if 0%{?rhel_version} >= 700 || 0%{?centos_version} >= 700 || 0%{?fedora_version} >= 19 -%define systemd_support 1 -%if 0%{?fedora_version} != 19 +%if 0%{?fedora_version} >= 20 %define glusterfs 1 +%define systemd_support 1 %endif + +%if 0%{?rhel_version} >= 700 || 0%{?centos_version} >= 700 +%define glusterfs 1 +%define objectstorage 1 +%define systemd_support 1 %endif %if 0%{?rhel_version} >= 700 @@ -317,7 +322,6 @@ Summary: Object Storage support for the Bareos Storage daemon Group: Productivity/Archiving/Backup Requires: %{name}-common = %{version} Requires: %{name}-storage = %{version} -Requires: libdroplet-common %endif %if 0%{?glusterfs} @@ -1039,8 +1043,8 @@ echo "This is a meta package to install a full bareos system" > %{buildroot}%{_d %defattr(-, root, root) %{backend_dir}/libbareossd-chunked*.so %{backend_dir}/libbareossd-object*.so -%{_sysconfdir}/bareos/bareos-dir.d/storage/Object.conf.example -%{_sysconfdir}/bareos/bareos-sd.d/device/ObjectStorage.conf.example +%attr(0640, %{director_daemon_user},%{daemon_group}) %{_sysconfdir}/bareos/bareos-dir.d/storage/Object.conf.example +%attr(0640, %{storage_daemon_user},%{daemon_group}) %{_sysconfdir}/bareos/bareos-sd.d/device/ObjectStorage.conf.example %endif %if 0%{?glusterfs} From fbebbe7ad05bf2d759724134b6a061da0ff28455 Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Thu, 2 Nov 2017 16:43:34 +0100 Subject: [PATCH 35/46] bareos-storage-droplet: renamed bareos-storage-object The name bareos-storage-object has been very generic. This backend utilize the the droplet library and even so it has been implemented to store data on S3 Object Storages, in can in principle also be used for other storages support by droplet. For this reason and to indicate the close relation to droplet, the source files and the packages have been renamed. (cherry picked from commit 57a3a389ee79c26d4e98045c099c3717c3d8f36f) --- autoconf/configure.in | 2 +- platforms/packaging/bareos.spec | 16 ++++----- src/stored/Makefile.in | 4 +-- src/stored/backends/Makefile.in | 6 ++-- ...object_store_device.c => droplet_device.c} | 36 +++++++++---------- .../storage/S3_Object.conf.example} | 6 ++-- .../device/S3_ObjectStorage.conf.example} | 13 +++---- ...object_store_device.h => droplet_device.h} | 6 ++-- src/stored/dev.c | 6 ++-- src/stored/dev.h | 6 ++-- src/stored/sd_backends.h | 4 +-- src/stored/stored_conf.c | 4 ++- 12 files changed, 56 insertions(+), 53 deletions(-) rename src/stored/backends/{object_store_device.c => droplet_device.c} (96%) rename src/stored/backends/{object_store_device.d/bareos-dir.d/storage/Object.conf.example => droplet_device.d/bareos-dir.d/storage/S3_Object.conf.example} (69%) rename src/stored/backends/{object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example => droplet_device.d/bareos-sd.d/device/S3_ObjectStorage.conf.example} (76%) rename src/stored/backends/{object_store_device.h => droplet_device.h} (95%) diff --git a/autoconf/configure.in b/autoconf/configure.in index c4f37c2ebbc..354c39a44d5 100644 --- a/autoconf/configure.in +++ b/autoconf/configure.in @@ -4555,7 +4555,7 @@ if test x$use_libtool != xno; then if test X"$have_droplet" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-chunked.la" - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-object.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-droplet.la" fi if test X"$have_ceph_rados" = "Xyes" ; then diff --git a/platforms/packaging/bareos.spec b/platforms/packaging/bareos.spec index 50e0f7b0d2f..21856cd3f01 100644 --- a/platforms/packaging/bareos.spec +++ b/platforms/packaging/bareos.spec @@ -317,8 +317,8 @@ Requires(pre): shadow-utils %endif %if 0%{?objectstorage} -%package storage-object -Summary: Object Storage support for the Bareos Storage daemon +%package storage-droplet +Summary: Object Storage support (through libdroplet) for the Bareos Storage daemon Group: Productivity/Archiving/Backup Requires: %{name}-common = %{version} Requires: %{name}-storage = %{version} @@ -586,10 +586,10 @@ This package contains the Storage Daemon tape support (Bareos service to read and write data from/to tape media) %if 0%{?objectstorage} -%description storage-object +%description storage-droplet %{dscr} -This package contains the Storage backend for Object Storage. +This package contains the Storage backend for Object Storage (through libdroplet). %endif %if 0%{?glusterfs} @@ -1039,12 +1039,12 @@ echo "This is a meta package to install a full bareos system" > %{buildroot}%{_d %{_sysconfdir}/bareos/bareos-sd.d/device/NULL.conf.example %if 0%{?objectstorage} -%files storage-object +%files storage-droplet %defattr(-, root, root) %{backend_dir}/libbareossd-chunked*.so -%{backend_dir}/libbareossd-object*.so -%attr(0640, %{director_daemon_user},%{daemon_group}) %{_sysconfdir}/bareos/bareos-dir.d/storage/Object.conf.example -%attr(0640, %{storage_daemon_user},%{daemon_group}) %{_sysconfdir}/bareos/bareos-sd.d/device/ObjectStorage.conf.example +%{backend_dir}/libbareossd-droplet*.so +%attr(0640, %{director_daemon_user},%{daemon_group}) %{_sysconfdir}/bareos/bareos-dir.d/storage/S3_Object.conf.example +%attr(0640, %{storage_daemon_user},%{daemon_group}) %{_sysconfdir}/bareos/bareos-sd.d/device/S3_ObjectStorage.conf.example %endif %if 0%{?glusterfs} diff --git a/src/stored/Makefile.in b/src/stored/Makefile.in index 8a9afa7604f..a225524c9d5 100644 --- a/src/stored/Makefile.in +++ b/src/stored/Makefile.in @@ -31,7 +31,7 @@ AVAILABLE_DEVICE_API_SRCS = cephfs_device.c \ chunked_device.c \ elasto_device.c \ gfapi_device.c \ - object_store_device.c \ + droplet_device.c \ rados_device.c \ generic_tape_device.c \ unix_fifo_device.c \ @@ -148,7 +148,7 @@ gfapi_device.lo: gfapi_device.c @echo "Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(GLUSTER_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< -object_storage_device.lo: object_storage_device.c +droplet_device.lo: droplet_device.c @echo "Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(DROPLET_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< diff --git a/src/stored/backends/Makefile.in b/src/stored/backends/Makefile.in index d590bacbf30..2222ad8d9c2 100644 --- a/src/stored/backends/Makefile.in +++ b/src/stored/backends/Makefile.in @@ -41,7 +41,7 @@ ELASTO_LOBJS = $(ELASTO_SRCS:.c=.lo) GFAPI_SRCS = gfapi_device.c GFAPI_LOBJS = $(GFAPI_SRCS:.c=.lo) -OBJECT_SRCS = object_store_device.c +OBJECT_SRCS = droplet_device.c OBJECT_LOBJS = $(OBJECT_SRCS:.c=.lo) RADOS_SRCS = rados_device.c @@ -130,10 +130,10 @@ libbareossd-gfapi.la: Makefile $(GFAPI_LOBJS) $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(GFAPI_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ -soname libbareossd-gfapi-$(LIBBAREOSSD_LT_RELEASE).so $(GLUSTER_LIBS) -lbareos -libbareossd-object.la: Makefile libbareossd-chunked.la $(OBJECT_LOBJS) +libbareossd-droplet.la: Makefile libbareossd-chunked.la $(OBJECT_LOBJS) @echo "Making $@ ..." $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(OBJECT_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ - -soname libbareossd-object-$(LIBBAREOSSD_LT_RELEASE).so $(DROPLET_LIBS) libbareossd-chunked.la -lbareos + -soname libbareossd-droplet-$(LIBBAREOSSD_LT_RELEASE).so $(DROPLET_LIBS) libbareossd-chunked.la -lbareos libbareossd-rados.la: Makefile $(RADOS_LOBJS) @echo "Making $@ ..." diff --git a/src/stored/backends/object_store_device.c b/src/stored/backends/droplet_device.c similarity index 96% rename from src/stored/backends/object_store_device.c rename to src/stored/backends/droplet_device.c index f9d78d039f5..50d108ccc00 100644 --- a/src/stored/backends/object_store_device.c +++ b/src/stored/backends/droplet_device.c @@ -24,7 +24,7 @@ * * Stacking is the following: * - * object_store_device:: + * droplet_device:: * | * v * chunked_device:: @@ -40,7 +40,7 @@ #ifdef HAVE_OBJECTSTORE #include "stored.h" #include "chunked_device.h" -#include "object_store_device.h" +#include "droplet_device.h" /* * Options that can be specified for this device type. @@ -275,7 +275,7 @@ static bool walk_dpl_directory(dpl_ctx_t *ctx, const char *dirname, t_call_back * This does the real work either by being called from a * io-thread or directly blocking the device. */ -bool object_store_device::flush_remote_chunk(chunk_io_request *request) +bool droplet_device::flush_remote_chunk(chunk_io_request *request) { bool retval = false; dpl_status_t status; @@ -410,7 +410,7 @@ bool object_store_device::flush_remote_chunk(chunk_io_request *request) /* * Internal method for reading a chunk from the remote backing store. */ -bool object_store_device::read_remote_chunk(chunk_io_request *request) +bool droplet_device::read_remote_chunk(chunk_io_request *request) { bool retval = false; dpl_status_t status; @@ -502,7 +502,7 @@ bool object_store_device::read_remote_chunk(chunk_io_request *request) /* * Internal method for truncating a chunked volume on the remote backing store. */ -bool object_store_device::truncate_remote_chunked_volume(DCR *dcr) +bool droplet_device::truncate_remote_chunked_volume(DCR *dcr) { POOL_MEM chunk_dir(PM_FNAME); @@ -517,7 +517,7 @@ bool object_store_device::truncate_remote_chunked_volume(DCR *dcr) /* * Initialize backend. */ -bool object_store_device::initialize() +bool droplet_device::initialize() { dpl_status_t status; @@ -757,7 +757,7 @@ bool object_store_device::initialize() /* * Open a volume using libdroplet. */ -int object_store_device::d_open(const char *pathname, int flags, int mode) +int droplet_device::d_open(const char *pathname, int flags, int mode) { if (!initialize()) { return -1; @@ -769,7 +769,7 @@ int object_store_device::d_open(const char *pathname, int flags, int mode) /* * Read data from a volume using libdroplet. */ -ssize_t object_store_device::d_read(int fd, void *buffer, size_t count) +ssize_t droplet_device::d_read(int fd, void *buffer, size_t count) { return read_chunked(fd, buffer, count); } @@ -777,17 +777,17 @@ ssize_t object_store_device::d_read(int fd, void *buffer, size_t count) /* * Write data to a volume using libdroplet. */ -ssize_t object_store_device::d_write(int fd, const void *buffer, size_t count) +ssize_t droplet_device::d_write(int fd, const void *buffer, size_t count) { return write_chunked(fd, buffer, count); } -int object_store_device::d_close(int fd) +int droplet_device::d_close(int fd) { return close_chunk(); } -int object_store_device::d_ioctl(int fd, ioctl_req_t request, char *op) +int droplet_device::d_ioctl(int fd, ioctl_req_t request, char *op) { return -1; } @@ -795,7 +795,7 @@ int object_store_device::d_ioctl(int fd, ioctl_req_t request, char *op) /* * Open a directory on the object store and find out size information for a volume. */ -ssize_t object_store_device::chunked_remote_volume_size() +ssize_t droplet_device::chunked_remote_volume_size() { dpl_status_t status; ssize_t volumesize = 0; @@ -855,7 +855,7 @@ ssize_t object_store_device::chunked_remote_volume_size() return volumesize; } -boffset_t object_store_device::d_lseek(DCR *dcr, boffset_t offset, int whence) +boffset_t droplet_device::d_lseek(DCR *dcr, boffset_t offset, int whence) { switch (whence) { case SEEK_SET: @@ -889,12 +889,12 @@ boffset_t object_store_device::d_lseek(DCR *dcr, boffset_t offset, int whence) return m_offset; } -bool object_store_device::d_truncate(DCR *dcr) +bool droplet_device::d_truncate(DCR *dcr) { return truncate_chunked_volume(dcr); } -object_store_device::~object_store_device() +droplet_device::~droplet_device() { if (m_ctx) { if (m_object_bucketname && m_ctx->cur_bucket) { @@ -917,7 +917,7 @@ object_store_device::~object_store_device() V(mutex); } -object_store_device::object_store_device() +droplet_device::droplet_device() { m_object_configstring = NULL; m_object_bucketname = NULL; @@ -933,8 +933,8 @@ extern "C" DEVICE SD_IMP_EXP *backend_instantiate(JCR *jcr, int device_type) DEVICE *dev = NULL; switch (device_type) { - case B_OBJECT_STORE_DEV: - dev = New(object_store_device); + case B_DROPLET_DEV: + dev = New(droplet_device); break; default: Jmsg(jcr, M_FATAL, 0, _("Request for unknown devicetype: %d\n"), device_type); diff --git a/src/stored/backends/object_store_device.d/bareos-dir.d/storage/Object.conf.example b/src/stored/backends/droplet_device.d/bareos-dir.d/storage/S3_Object.conf.example similarity index 69% rename from src/stored/backends/object_store_device.d/bareos-dir.d/storage/Object.conf.example rename to src/stored/backends/droplet_device.d/bareos-dir.d/storage/S3_Object.conf.example index 394e0dc0069..45d42dfebba 100644 --- a/src/stored/backends/object_store_device.d/bareos-dir.d/storage/Object.conf.example +++ b/src/stored/backends/droplet_device.d/bareos-dir.d/storage/S3_Object.conf.example @@ -1,8 +1,8 @@ Storage { - Name = ObjectS3 + Name = S3_Object Address = "Replace this by the Bareos Storage Daemon FQDN or IP address" Password = "Replace this by the Bareos Storage Daemon director password" - Device = ObjectStorage - Media Type = S3_File1 + Device = S3_ObjectStorage + Media Type = S3_Object1 } diff --git a/src/stored/backends/object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example b/src/stored/backends/droplet_device.d/bareos-sd.d/device/S3_ObjectStorage.conf.example similarity index 76% rename from src/stored/backends/object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example rename to src/stored/backends/droplet_device.d/bareos-sd.d/device/S3_ObjectStorage.conf.example index dd4d748b098..1b94a0537da 100644 --- a/src/stored/backends/object_store_device.d/bareos-sd.d/device/ObjectStorage.conf.example +++ b/src/stored/backends/droplet_device.d/bareos-sd.d/device/S3_ObjectStorage.conf.example @@ -1,7 +1,7 @@ Device { - Name = ObjectStorage - Media Type = S3_File1 - Archive Device = Object S3 Storage + Name = S3_ObjectStorage + Media Type = S3_Object1 + Archive Device = S3 Object Storage # # Config options: # profile= - Droplet profile to use either absolute PATH or logical name (e.g. ~/.droplet/.profile @@ -14,13 +14,14 @@ Device { # ioslots= - Number of IO-slots per IO-thread (default 10) # mmap - Use mmap to allocate Chunk memory instead of malloc(). # - Device Options = "profile=/etc/bareos/bareos-sd.d/.objectstorage/objectstorage.profile,bucket=bareos,iothreads=2" - Device Type = object + Device Options = "profile=/etc/bareos/bareos-sd.d/device/droplet/droplet.profile,bucket=bareos,iothreads=2" + Device Type = droplet LabelMedia = yes # lets Bareos label unlabeled media Random Access = yes AutomaticMount = yes # when device opened, read it RemovableMedia = no AlwaysOpen = no - Description = "Object S3 device. A connecting Director must have the same Name and MediaType." + Description = "S3 Object device. A connecting Director must have the same Name and MediaType." Maximum File Size = 200000000 # 200 MB (Allows for seeking to small portions of the Volume) } + diff --git a/src/stored/backends/object_store_device.h b/src/stored/backends/droplet_device.h similarity index 95% rename from src/stored/backends/object_store_device.h rename to src/stored/backends/droplet_device.h index dfa1077a18d..92c0f955abe 100644 --- a/src/stored/backends/object_store_device.h +++ b/src/stored/backends/droplet_device.h @@ -31,7 +31,7 @@ #include #include -class object_store_device: public chunked_device { +class droplet_device: public chunked_device { private: /* * Private Members @@ -62,8 +62,8 @@ class object_store_device: public chunked_device { /* * Public Methods */ - object_store_device(); - ~object_store_device(); + droplet_device(); + ~droplet_device(); /* * Interface from DEVICE diff --git a/src/stored/dev.c b/src/stored/dev.c index f87e1aea589..141e8d4f80c 100644 --- a/src/stored/dev.c +++ b/src/stored/dev.c @@ -79,7 +79,7 @@ #endif #ifdef HAVE_OBJECTSTORE #include "backends/chunked_device.h" -#include "backends/object_store_device.h" +#include "backends/droplet_device.h" #endif #ifdef HAVE_RADOS #include "backends/rados_device.h" @@ -183,8 +183,8 @@ static inline DEVICE *m_init_dev(JCR *jcr, DEVRES *device, bool new_init) break; #endif #ifdef HAVE_OBJECTSTORE - case B_OBJECT_STORE_DEV: - dev = New(object_store_device); + case B_DROPLET_DEV: + dev = New(droplet_device); break; #endif #ifdef HAVE_RADOS diff --git a/src/stored/dev.h b/src/stored/dev.h index d92ef38e20c..4a87b9f6203 100644 --- a/src/stored/dev.h +++ b/src/stored/dev.h @@ -3,7 +3,7 @@ Copyright (C) 2000-2012 Free Software Foundation Europe e.V. Copyright (C) 2011-2012 Planets Communications B.V. - Copyright (C) 2013-2013 Bareos GmbH & Co. KG + Copyright (C) 2013-2017 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or modify it under the terms of version three of the GNU Affero General Public @@ -91,7 +91,7 @@ enum { B_FIFO_DEV, B_VTL_DEV, B_GFAPI_DEV, - B_OBJECT_STORE_DEV, + B_DROPLET_DEV, B_RADOS_DEV, B_CEPHFS_DEV, B_ELASTO_DEV @@ -373,7 +373,7 @@ class DEVICE: public SMARTALLOC { bool is_tape() const { return (dev_type == B_TAPE_DEV); } bool is_file() const { return (dev_type == B_FILE_DEV || dev_type == B_GFAPI_DEV || - dev_type == B_OBJECT_STORE_DEV || + dev_type == B_DROPLET_DEV || dev_type == B_RADOS_DEV || dev_type == B_CEPHFS_DEV || dev_type == B_ELASTO_DEV); } diff --git a/src/stored/sd_backends.h b/src/stored/sd_backends.h index 6d8a9006dc7..4f0ecb4b9c8 100644 --- a/src/stored/sd_backends.h +++ b/src/stored/sd_backends.h @@ -2,7 +2,7 @@ BAREOS® - Backup Archiving REcovery Open Sourced Copyright (C) 2014-2014 Planets Communications B.V. - Copyright (C) 2014-2014 Bareos GmbH & Co. KG + Copyright (C) 2014-2017 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or modify it under the terms of version three of the GNU Affero General Public @@ -64,7 +64,7 @@ static struct backend_interface_mapping_t { { B_FIFO_DEV, "fifo" }, { B_TAPE_DEV, "tape" }, { B_GFAPI_DEV, "gfapi" }, - { B_OBJECT_STORE_DEV, "object" }, + { B_DROPLET_DEV, "droplet" }, { B_RADOS_DEV, "rados" }, { B_CEPHFS_DEV, "cephfs" }, { B_ELASTO_DEV, "elasto" }, diff --git a/src/stored/stored_conf.c b/src/stored/stored_conf.c index 4676a1847c6..73b581b87c7 100644 --- a/src/stored/stored_conf.c +++ b/src/stored/stored_conf.c @@ -259,7 +259,9 @@ static s_kw dev_types[] = { { "fifo", B_FIFO_DEV }, { "vtl", B_VTL_DEV }, { "gfapi", B_GFAPI_DEV }, - { "object", B_OBJECT_STORE_DEV }, + /* compatibility: object have been renamed to droplet */ + { "object", B_DROPLET_DEV }, + { "droplet", B_DROPLET_DEV }, { "rados", B_RADOS_DEV }, { "cephfs", B_CEPHFS_DEV }, { "elasto", B_ELASTO_DEV }, From 87affd2d24a728a0a2d8050253e4d19d2004177b Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Thu, 9 Nov 2017 17:03:45 +0100 Subject: [PATCH 36/46] build: Order building of plugins and storage backends We now order the build of both plugins and storage backends in Makefile and configure ascending. --- autoconf/configure.in | 59 +++++++++++++++--------------- src/stored/Makefile.in | 12 +++---- src/stored/backends/Makefile.in | 64 ++++++++++++++++----------------- 3 files changed, 66 insertions(+), 69 deletions(-) diff --git a/autoconf/configure.in b/autoconf/configure.in index 354c39a44d5..23d831e891c 100644 --- a/autoconf/configure.in +++ b/autoconf/configure.in @@ -4503,6 +4503,20 @@ DEBIAN_CONTROL_DIRECTOR_PYTHON_PLUGIN=/dev/null dnl build a list of plugins we need to build. if test x$use_libtool != xno; then + if test X"$have_zlib" = "Xyes" -o \ + X"$have_lzo" = "Xyes" -o \ + X"$have_fastlz" = "Xyes" ; then + BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} autoxflate-sd.la" + fi + + if test X"$have_cephfs" = "Xyes" ; then + BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} cephfs-fd.la" + fi + + if test X"$have_glusterfs" = "Xyes" ; then + BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} gfapi-fd.la" + fi + if test X"$support_python" = "Xyes" ; then BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} python-fd.la" BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} python-sd.la" @@ -4512,29 +4526,15 @@ if test x$use_libtool != xno; then DEBIAN_CONTROL_DIRECTOR_PYTHON_PLUGIN=./debian/control.bareos-director-python-plugin fi - if test X"$have_scsi_crypto" = "Xyes" ; then - BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} scsicrypto-sd.la scsitapealert-sd.la" + if test X"$have_ceph_rados" = "Xyes" ; then + BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} rados-fd.la" fi - if test X"$have_zlib" = "Xyes" -o \ - X"$have_lzo" = "Xyes" -o \ - X"$have_fastlz" = "Xyes" ; then - BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} autoxflate-sd.la" + if test X"$have_scsi_crypto" = "Xyes" ; then + BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} scsicrypto-sd.la scsitapealert-sd.la" fi fi -if test X"$have_ceph_rados" = "Xyes" ; then - BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} rados-fd.la" -fi - -if test X"$have_glusterfs" = "Xyes" ; then - BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} gfapi-fd.la" -fi - -if test X"$have_cephfs" = "Xyes" ; then - BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} cephfs-fd.la" -fi - AC_SUBST(BUILD_FD_PLUGINS) AC_SUBST(BUILD_SD_PLUGINS) AC_SUBST(BUILD_DIR_PLUGINS) @@ -4545,12 +4545,8 @@ AC_SUBST_FILE(DEBIAN_CONTROL_DIRECTOR_PYTHON_PLUGIN) dnl build a list of storage backends we need to build. BUILD_SD_BACKENDS="" if test x$use_libtool != xno; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-tape.la" - - if test X"$have_glusterfs" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gfapi.la" + if test X"$have_cephfs" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-cephfs.la" fi if test X"$have_droplet" = "Xyes" ; then @@ -4558,17 +4554,18 @@ if test x$use_libtool != xno; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-droplet.la" fi - if test X"$have_ceph_rados" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-rados.la" + if test X"$have_elasto" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-elasto.la" fi - if test X"$have_cephfs" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-cephfs.la" - fi + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" - if test X"$have_elasto" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-elasto.la" + if test X"$have_ceph_rados" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-rados.la" fi + + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-tape.la" fi AC_SUBST(BUILD_SD_BACKENDS) diff --git a/src/stored/Makefile.in b/src/stored/Makefile.in index a225524c9d5..20ffe88af97 100644 --- a/src/stored/Makefile.in +++ b/src/stored/Makefile.in @@ -29,11 +29,11 @@ dummy: AVAILABLE_DEVICE_API_SRCS = cephfs_device.c \ chunked_device.c \ + droplet_device.c \ elasto_device.c \ + generic_tape_device.c \ gfapi_device.c \ - droplet_device.c \ rados_device.c \ - generic_tape_device.c \ unix_fifo_device.c \ unix_tape_device.c NEEDED_DEVICE_API_SRCS = unix_file_device.c @NEEDED_DEVICE_API_SRCS@ @@ -140,6 +140,10 @@ cephfs_device.lo: cephfs_device.c @echo "Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(CEPHFS_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< +droplet_device.lo: droplet_device.c + @echo "Compiling $<" + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(DROPLET_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< + elasto_device.lo: elasto_device.c @echo "Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(ELASTO_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< @@ -148,10 +152,6 @@ gfapi_device.lo: gfapi_device.c @echo "Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(GLUSTER_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< -droplet_device.lo: droplet_device.c - @echo "Compiling $<" - $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(DROPLET_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< - rados_device.lo: rados_device.c @echo "Compiling $<" $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) $(RADOS_INC) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< diff --git a/src/stored/backends/Makefile.in b/src/stored/backends/Makefile.in index 2222ad8d9c2..d714a0517bb 100644 --- a/src/stored/backends/Makefile.in +++ b/src/stored/backends/Makefile.in @@ -35,24 +35,24 @@ CHEPHFS_LOBJS = $(CHEPHFS_SRCS:.c=.lo) CHUNKED_SRCS = chunked_device.c CHUNKED_LOBJS = $(CHUNKED_SRCS:.c=.lo) +DROPLET_SRCS = droplet_device.c +DROPLET_LOBJS = $(DROPLET_SRCS:.c=.lo) + ELASTO_SRCS = elasto_device.c ELASTO_LOBJS = $(ELASTO_SRCS:.c=.lo) -GFAPI_SRCS = gfapi_device.c -GFAPI_LOBJS = $(GFAPI_SRCS:.c=.lo) - -OBJECT_SRCS = droplet_device.c -OBJECT_LOBJS = $(OBJECT_SRCS:.c=.lo) - -RADOS_SRCS = rados_device.c -RADOS_LOBJS = $(RADOS_SRCS:.c=.lo) - FIFO_SRCS = unix_fifo_device.c FIFO_LOBJS = $(FIFO_SRCS:.c=.lo) GEN_TAPE_SRCS = generic_tape_device.c GEN_TAPE_LOBJS = $(GEN_TAPE_SRCS:.c=.lo) +GFAPI_SRCS = gfapi_device.c +GFAPI_LOBJS = $(GFAPI_SRCS:.c=.lo) + +RADOS_SRCS = rados_device.c +RADOS_LOBJS = $(RADOS_SRCS:.c=.lo) + TAPE_SRCS = unix_tape_device.c TAPE_LOBJS = $(TAPE_SRCS:.c=.lo) @@ -80,24 +80,24 @@ STORED_RESTYPES = autochanger device director ndmp messages storage $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DINCLUDE) $(CXXFLAGS) $< if [ -d "$(@:.lo=.d)" ]; then $(MKDIR) $(CONF_EXTRA_DIR); $(CP) -r $(@:.lo=.d)/. $(CONF_EXTRA_DIR)/.; fi -$(ELASTO_LOBJS): +$(CHEPHFS_LOBJS): @echo "Compiling $(@:.lo=.c)" - $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(ELASTO_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(CEPHFS_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) if [ -d "$(@:.lo=.d)" ]; then $(MKDIR) $(CONF_EXTRA_DIR); $(CP) -r $(@:.lo=.d)/. $(CONF_EXTRA_DIR)/.; fi -$(CHEPHFS_LOBJS): +$(DROPLET_LOBJS): @echo "Compiling $(@:.lo=.c)" - $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(CEPHFS_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DROPLET_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) if [ -d "$(@:.lo=.d)" ]; then $(MKDIR) $(CONF_EXTRA_DIR); $(CP) -r $(@:.lo=.d)/. $(CONF_EXTRA_DIR)/.; fi -$(GFAPI_LOBJS): +$(ELASTO_LOBJS): @echo "Compiling $(@:.lo=.c)" - $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(GLUSTER_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(ELASTO_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) if [ -d "$(@:.lo=.d)" ]; then $(MKDIR) $(CONF_EXTRA_DIR); $(CP) -r $(@:.lo=.d)/. $(CONF_EXTRA_DIR)/.; fi -$(OBJECT_LOBJS): +$(GFAPI_LOBJS): @echo "Compiling $(@:.lo=.c)" - $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(DROPLET_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) + $(NO_ECHO)$(LIBTOOL_COMPILE) $(CXX) $(DEFS) $(DEBUG) -c $(WCFLAGS) $(CPPFLAGS) $(INCLUDES) $(GLUSTER_INC) $(DINCLUDE) $(CXXFLAGS) $(@:.lo=.c) if [ -d "$(@:.lo=.d)" ]; then $(MKDIR) $(CONF_EXTRA_DIR); $(CP) -r $(@:.lo=.d)/. $(CONF_EXTRA_DIR)/.; fi $(RADOS_LOBJS): @@ -120,25 +120,15 @@ libbareossd-chunked.la: Makefile $(CHUNKED_LOBJS) $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(CHUNKED_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ -soname libbareossd-chunked-$(LIBBAREOSSD_LT_RELEASE).so -lbareos -libbareossd-elasto.la: Makefile $(ELASTO_LOBJS) - @echo "Making $@ ..." - $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(ELASTO_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ - -soname libbareossd-elasto-$(LIBBAREOSSD_LT_RELEASE).so $(ELASTO_LIBS) -lbareos - -libbareossd-gfapi.la: Makefile $(GFAPI_LOBJS) - @echo "Making $@ ..." - $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(GFAPI_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ - -soname libbareossd-gfapi-$(LIBBAREOSSD_LT_RELEASE).so $(GLUSTER_LIBS) -lbareos - -libbareossd-droplet.la: Makefile libbareossd-chunked.la $(OBJECT_LOBJS) +libbareossd-droplet.la: Makefile libbareossd-chunked.la $(DROPLET_LOBJS) @echo "Making $@ ..." - $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(OBJECT_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(DROPLET_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ -soname libbareossd-droplet-$(LIBBAREOSSD_LT_RELEASE).so $(DROPLET_LIBS) libbareossd-chunked.la -lbareos -libbareossd-rados.la: Makefile $(RADOS_LOBJS) +libbareossd-elasto.la: Makefile $(ELASTO_LOBJS) @echo "Making $@ ..." - $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(RADOS_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ - -soname libbareossd-rados-$(LIBBAREOSSD_LT_RELEASE).so $(RADOS_LIBS) -lbareos + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(ELASTO_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ + -soname libbareossd-elasto-$(LIBBAREOSSD_LT_RELEASE).so $(ELASTO_LIBS) -lbareos libbareossd-fifo.la: Makefile $(FIFO_LOBJS) @echo "Making $@ ..." @@ -150,6 +140,16 @@ libbareossd-gentape.la: Makefile $(GEN_TAPE_LOBJS) $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(GEN_TAPE_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ -soname libbareossd-gentape-$(LIBBAREOSSD_LT_RELEASE).so -lbareos +libbareossd-gfapi.la: Makefile $(GFAPI_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(GFAPI_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ + -soname libbareossd-gfapi-$(LIBBAREOSSD_LT_RELEASE).so $(GLUSTER_LIBS) -lbareos + +libbareossd-rados.la: Makefile $(RADOS_LOBJS) + @echo "Making $@ ..." + $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(RADOS_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ + -soname libbareossd-rados-$(LIBBAREOSSD_LT_RELEASE).so $(RADOS_LIBS) -lbareos + libbareossd-tape.la: Makefile libbareossd-gentape.la $(TAPE_LOBJS) @echo "Making $@ ..." $(LIBTOOL_LINK) $(CXX) $(DEFS) $(DEBUG) $(LDFLAGS) -L../../lib -o $@ $(TAPE_LOBJS) -export-dynamic -rpath $(backenddir) -release $(LIBBAREOSSD_LT_RELEASE) \ From 04f4d9287bcd4d367392448932e3d13c1ed56d98 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Thu, 9 Nov 2017 17:04:24 +0100 Subject: [PATCH 37/46] build: Rebuild configure --- configure | 75 ++++++++++++++++++++++--------------------------------- 1 file changed, 30 insertions(+), 45 deletions(-) diff --git a/configure b/configure index 6827b1dca18..c5a3297764a 100755 --- a/configure +++ b/configure @@ -963,7 +963,6 @@ infodir docdir oldincludedir includedir -runstatedir localstatedir sharedstatedir sysconfdir @@ -1151,7 +1150,6 @@ datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' -runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' @@ -1404,15 +1402,6 @@ do | -silent | --silent | --silen | --sile | --sil) silent=yes ;; - -runstatedir | --runstatedir | --runstatedi | --runstated \ - | --runstate | --runstat | --runsta | --runst | --runs \ - | --run | --ru | --r) - ac_prev=runstatedir ;; - -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ - | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ - | --run=* | --ru=* | --r=*) - runstatedir=$ac_optarg ;; - -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ @@ -1550,7 +1539,7 @@ fi for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir runstatedir + libdir localedir mandir do eval ac_val=\$$ac_var # Remove trailing slashes. @@ -1703,7 +1692,6 @@ Fine tuning of the installation directories: --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] - --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] @@ -33777,6 +33765,20 @@ DEBIAN_CONTROL_STORAGE_PYTHON_PLUGIN=/dev/null DEBIAN_CONTROL_DIRECTOR_PYTHON_PLUGIN=/dev/null if test x$use_libtool != xno; then + if test X"$have_zlib" = "Xyes" -o \ + X"$have_lzo" = "Xyes" -o \ + X"$have_fastlz" = "Xyes" ; then + BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} autoxflate-sd.la" + fi + + if test X"$have_cephfs" = "Xyes" ; then + BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} cephfs-fd.la" + fi + + if test X"$have_glusterfs" = "Xyes" ; then + BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} gfapi-fd.la" + fi + if test X"$support_python" = "Xyes" ; then BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} python-fd.la" BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} python-sd.la" @@ -33786,29 +33788,15 @@ if test x$use_libtool != xno; then DEBIAN_CONTROL_DIRECTOR_PYTHON_PLUGIN=./debian/control.bareos-director-python-plugin fi - if test X"$have_scsi_crypto" = "Xyes" ; then - BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} scsicrypto-sd.la scsitapealert-sd.la" + if test X"$have_ceph_rados" = "Xyes" ; then + BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} rados-fd.la" fi - if test X"$have_zlib" = "Xyes" -o \ - X"$have_lzo" = "Xyes" -o \ - X"$have_fastlz" = "Xyes" ; then - BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} autoxflate-sd.la" + if test X"$have_scsi_crypto" = "Xyes" ; then + BUILD_SD_PLUGINS="${BUILD_SD_PLUGINS} scsicrypto-sd.la scsitapealert-sd.la" fi fi -if test X"$have_ceph_rados" = "Xyes" ; then - BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} rados-fd.la" -fi - -if test X"$have_glusterfs" = "Xyes" ; then - BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} gfapi-fd.la" -fi - -if test X"$have_cephfs" = "Xyes" ; then - BUILD_FD_PLUGINS="${BUILD_FD_PLUGINS} cephfs-fd.la" -fi - @@ -33818,30 +33806,27 @@ fi BUILD_SD_BACKENDS="" if test x$use_libtool != xno; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-tape.la" - - if test X"$have_glusterfs" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gfapi.la" + if test X"$have_cephfs" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-cephfs.la" fi if test X"$have_droplet" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-chunked.la" - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-object.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-droplet.la" fi - if test X"$have_ceph_rados" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-rados.la" + if test X"$have_elasto" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-elasto.la" fi - if test X"$have_cephfs" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-cephfs.la" - fi + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" - if test X"$have_elasto" = "Xyes" ; then - BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-elasto.la" + if test X"$have_ceph_rados" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-rados.la" fi + + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-tape.la" fi From 09264f494587e556a03177e19688da174e3518d9 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Thu, 9 Nov 2017 18:42:52 +0100 Subject: [PATCH 38/46] droplet: Remove some more object naming. Now that the driver has been renamed to droplet_device instead of object_store_device we should also rename some more variables and function names to no longer use the object keyword. --- src/stored/backends/droplet_device.c | 28 ++++++++++++++-------------- src/stored/backends/droplet_device.h | 4 ++-- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/stored/backends/droplet_device.c b/src/stored/backends/droplet_device.c index 50d108ccc00..b6a3faa9ef0 100644 --- a/src/stored/backends/droplet_device.c +++ b/src/stored/backends/droplet_device.c @@ -85,7 +85,7 @@ static pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; /* * Generic log function that glues libdroplet with BAREOS. */ -static void object_store_logfunc(dpl_ctx_t *ctx, dpl_log_level_t level, const char *message) +static void droplet_device_logfunc(dpl_ctx_t *ctx, dpl_log_level_t level, const char *message) { switch (level) { case DPL_DEBUG: @@ -526,7 +526,7 @@ bool droplet_device::initialize() */ P(mutex); if (droplet_reference_count == 0) { - dpl_set_log_func(object_store_logfunc); + dpl_set_log_func(droplet_device_logfunc); status = dpl_init(); switch (status) { @@ -540,7 +540,7 @@ bool droplet_device::initialize() droplet_reference_count++; V(mutex); - if (!m_object_configstring) { + if (!m_configstring) { int len; bool done; uint64_t value; @@ -552,9 +552,9 @@ bool droplet_device::initialize() return -1; } - m_object_configstring = bstrdup(dev_options); + m_configstring = bstrdup(dev_options); - bp = m_object_configstring; + bp = m_configstring; while (bp) { next_option = strchr(bp, ','); if (next_option) { @@ -596,7 +596,7 @@ bool droplet_device::initialize() done = true; break; case argument_bucket: - m_object_bucketname = bp + device_options[i].compare_size; + m_bucketname = bp + device_options[i].compare_size; done = true; break; case argument_chunksize: @@ -743,8 +743,8 @@ bool droplet_device::initialize() /* * If a bucketname was defined set it in the context. */ - if (m_object_bucketname) { - m_ctx->cur_bucket = bstrdup(m_object_bucketname); + if (m_bucketname) { + m_ctx->cur_bucket = bstrdup(m_bucketname); } } @@ -793,7 +793,7 @@ int droplet_device::d_ioctl(int fd, ioctl_req_t request, char *op) } /* - * Open a directory on the object store and find out size information for a volume. + * Open a directory on the backing store and find out size information for a volume. */ ssize_t droplet_device::chunked_remote_volume_size() { @@ -897,7 +897,7 @@ bool droplet_device::d_truncate(DCR *dcr) droplet_device::~droplet_device() { if (m_ctx) { - if (m_object_bucketname && m_ctx->cur_bucket) { + if (m_bucketname && m_ctx->cur_bucket) { free(m_ctx->cur_bucket); m_ctx->cur_bucket = NULL; } @@ -905,8 +905,8 @@ droplet_device::~droplet_device() m_ctx = NULL; } - if (m_object_configstring) { - free(m_object_configstring); + if (m_configstring) { + free(m_configstring); } P(mutex); @@ -919,8 +919,8 @@ droplet_device::~droplet_device() droplet_device::droplet_device() { - m_object_configstring = NULL; - m_object_bucketname = NULL; + m_configstring = NULL; + m_bucketname = NULL; m_location = NULL; m_canned_acl = NULL; m_storage_class = NULL; diff --git a/src/stored/backends/droplet_device.h b/src/stored/backends/droplet_device.h index 92c0f955abe..7e1b25df3a0 100644 --- a/src/stored/backends/droplet_device.h +++ b/src/stored/backends/droplet_device.h @@ -36,12 +36,12 @@ class droplet_device: public chunked_device { /* * Private Members */ - char *m_object_configstring; + char *m_configstring; const char *m_profile; const char *m_location; const char *m_canned_acl; const char *m_storage_class; - const char *m_object_bucketname; + const char *m_bucketname; dpl_ctx_t *m_ctx; dpl_sysmd_t m_sysmd; From d1b84e79e6eb9c4a08fd75f54c80bfa849456961 Mon Sep 17 00:00:00 2001 From: Marco van Wieringen Date: Mon, 11 Dec 2017 13:21:18 +0100 Subject: [PATCH 39/46] lib: Fix layout. --- src/lib/edit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/lib/edit.c b/src/lib/edit.c index 71bcca03d91..4869d726f62 100644 --- a/src/lib/edit.c +++ b/src/lib/edit.c @@ -365,8 +365,8 @@ char *edit_utime(utime_t val, char *buf, int buf_len) char mybuf[200]; static const int32_t mult[] = { 60 * 60 * 24 * 365, - 60 *60 * 24 *30, - 60 *60 * 24, + 60 * 60 * 24 * 30, + 60 * 60 * 24, 60 * 60, 60 }; From 4b5746e60ca39f4431cc0a4ceda98081325e286a Mon Sep 17 00:00:00 2001 From: Philipp Storz Date: Sat, 16 Dec 2017 09:20:41 +0100 Subject: [PATCH 40/46] Fix typo netowkr -> network --- src/dird/dird_conf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/dird/dird_conf.c b/src/dird/dird_conf.c index a7a30bc7632..658b1c2c2ee 100644 --- a/src/dird/dird_conf.c +++ b/src/dird/dird_conf.c @@ -224,7 +224,7 @@ static RES_ITEM cli_items[] = { { "FdPassword", CFG_TYPE_AUTOPASSWORD, ITEM(res_client.password), 0, CFG_ITEM_ALIAS, NULL, NULL, NULL }, { "Catalog", CFG_TYPE_RES, ITEM(res_client.catalog), R_CATALOG, 0, NULL, NULL, NULL }, { "Passive", CFG_TYPE_BOOL, ITEM(res_client.passive), 0, CFG_ITEM_DEFAULT, "false", "13.2.0-", - "If enabled, the Storage Daemon will initiate the network connection to the Client. If disabled, the Client will initiate the netowrk connection to the Storage Daemon." }, + "If enabled, the Storage Daemon will initiate the network connection to the Client. If disabled, the Client will initiate the network connection to the Storage Daemon." }, { "ConnectionFromDirectorToClient", CFG_TYPE_BOOL, ITEM(res_client.conn_from_dir_to_fd), 0, CFG_ITEM_DEFAULT, "true", "16.2.2", "Let the Director initiate the network connection to the Client." }, { "AllowClientConnect", CFG_TYPE_BOOL, ITEM(res_client.conn_from_fd_to_dir), 0, CFG_ITEM_DEPRECATED | CFG_ITEM_ALIAS, NULL, NULL, From 74c9ed414769c63174741a23227d8f90b241bdc3 Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Mon, 18 Dec 2017 13:35:08 +0100 Subject: [PATCH 41/46] readded building of libbareossd-gfapi --- autoconf/configure.in | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/autoconf/configure.in b/autoconf/configure.in index d48bd220972..01b8914df98 100644 --- a/autoconf/configure.in +++ b/autoconf/configure.in @@ -4561,6 +4561,10 @@ if test x$use_libtool != xno; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" + if test X"$have_glusterfs" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gfapi.la" + fi + if test X"$have_ceph_rados" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-rados.la" fi From 9b21f44792a6fb2af7cd24facd78470b9edc119b Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Mon, 18 Dec 2017 16:07:28 +0100 Subject: [PATCH 42/46] configure --- configure | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/configure b/configure index d80bc94ee34..acaa514bfdc 100755 --- a/configure +++ b/configure @@ -963,6 +963,7 @@ infodir docdir oldincludedir includedir +runstatedir localstatedir sharedstatedir sysconfdir @@ -1150,6 +1151,7 @@ datadir='${datarootdir}' sysconfdir='${prefix}/etc' sharedstatedir='${prefix}/com' localstatedir='${prefix}/var' +runstatedir='${localstatedir}/run' includedir='${prefix}/include' oldincludedir='/usr/include' docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' @@ -1402,6 +1404,15 @@ do | -silent | --silent | --silen | --sile | --sil) silent=yes ;; + -runstatedir | --runstatedir | --runstatedi | --runstated \ + | --runstate | --runstat | --runsta | --runst | --runs \ + | --run | --ru | --r) + ac_prev=runstatedir ;; + -runstatedir=* | --runstatedir=* | --runstatedi=* | --runstated=* \ + | --runstate=* | --runstat=* | --runsta=* | --runst=* | --runs=* \ + | --run=* | --ru=* | --r=*) + runstatedir=$ac_optarg ;; + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) ac_prev=sbindir ;; -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ @@ -1539,7 +1550,7 @@ fi for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ datadir sysconfdir sharedstatedir localstatedir includedir \ oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ - libdir localedir mandir + libdir localedir mandir runstatedir do eval ac_val=\$$ac_var # Remove trailing slashes. @@ -1692,6 +1703,7 @@ Fine tuning of the installation directories: --sysconfdir=DIR read-only single-machine data [PREFIX/etc] --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --runstatedir=DIR modifiable per-process data [LOCALSTATEDIR/run] --libdir=DIR object code libraries [EPREFIX/lib] --includedir=DIR C header files [PREFIX/include] --oldincludedir=DIR C header files for non-gcc [/usr/include] @@ -33822,6 +33834,10 @@ if test x$use_libtool != xno; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-fifo.la" BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gentape.la" + if test X"$have_glusterfs" = "Xyes" ; then + BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-gfapi.la" + fi + if test X"$have_ceph_rados" = "Xyes" ; then BUILD_SD_BACKENDS="${BUILD_SD_BACKENDS} libbareossd-rados.la" fi From 8b832476f364932f7aad7f026fbb24aab5eea3e4 Mon Sep 17 00:00:00 2001 From: ProxyManagedServices Date: Mon, 18 Dec 2017 13:16:36 +0100 Subject: [PATCH 43/46] Created the initial README.droplet --- README.droplet | 80 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) create mode 100644 README.droplet diff --git a/README.droplet b/README.droplet new file mode 100644 index 00000000000..1e0050b68a2 --- /dev/null +++ b/README.droplet @@ -0,0 +1,80 @@ +Using droplet S3 as a backingstore for backups. + +The droplet S3 storage backend writes chunks of data in an S3 bucket. + +For this you need to install the libdroplet-devel and the storage-droplet packages which contains +the libbareossd-chunked*.so and libbareossd-droplet*.so shared objects and the droplet storage backend which implements a dynamic loaded +storage backend. + +In the following example all the backup data is placed in the "bareos-backup" bucket on the defined S3 storage. +A Volume is a sub-directory in the defined bucket, and every chunk is placed in the Volume directory withe the filename 0000-9999 and a size +that is defined in the chunksize. + +The droplet S3 can only be used with virtual-hosted-style buckets like http://./object +Path-style buckets are not supported when using the droplet S3. + +On the Storage Daemon the following configuration is needed. +Example bareos-sd.d/device file: + +Device { + Name = "S3_1-00" + Media Type = "S3_File_1" + Archive Device = Object S3 Storage + # + # Config options: + # profile= - Droplet profile to use either absolute PATH or logical name (e.g. ~/.droplet/.profile + # location= - AWS location (e.g. us-east etc.) + # acl= - Canned ACL + # storageclass - Storage Class to use. + # bucket= - Bucket to store objects in. + # chunksize= - Size of Volume Chunks (default = 10 Mb) + # iothreads= - Number of IO-threads to use for upload (use blocking uploads if not defined.) + # ioslots= - Number of IO-slots per IO-thread (default 10) + # mmap - Use mmap to allocate Chunk memory instead of malloc(). + # + Device Options = "profile=/etc/bareos/bareos-sd.d/.droplet/droplet.profile,bucket=backup-bareos,iothreads=3,ioslots=3,chunksize=100M" + Device Type = droplet + LabelMedia = yes # lets Bareos label unlabeled media + Random Access = yes + AutomaticMount = yes # when device opened, read it + RemovableMedia = no + AlwaysOpen = no + Description = "Object S3 device. A connecting Director must have the same Name and MediaType." + Maximum File Size = 500M # 500 MB (Allows for seeking to small portions of the Volume) + Maximum Concurrent Jobs = 1 + Maximum Spool Size = 15000M +} + + +The droplet.profile file holds the credentials for S3 storage +Example /etc/bareos/bareos-sd.d/.droplet/droplet.profile file: + +Make sure the file is only readable for bareos, credentials for S3 are listed here. + +Config options profile: + +use_https = True +host = +access_key = +secret_key = +pricing_dir = "" +backend = s3 +aws_auth_sign_version = 2 + +If the pricing_dir is not empty, it will create an /droplet.csv file wich +will record all S3 operations. +See the 'libdroplet/src/pricing.c' code for an explanation. + +The parameter "aws_auth_sign_version = 2" is for the connection to a CEPH AWS connection. +For use with AWS S3 the aws_auth_sign_version, must be set to "4". + +On the Director you connect to the Storage Device with the following configuration +Example bareos-dir.d/storage file: + +Storage { + Name = S3_1-00 + Address = "Replace this by the Bareos Storage Daemon FQDN or IP address" + Password = "Replace this by the Bareos Storage Daemon director password" + Device = S3_ObjectStorage + Media Type = S3_File_1 +} From c785cd662af0cdaa5b3ba618d93b98a6e8228f36 Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Sun, 17 Dec 2017 20:23:20 +0100 Subject: [PATCH 44/46] travis: fix --- test/travis_before_install.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/test/travis_before_install.sh b/test/travis_before_install.sh index bca62bd2811..4bc822143a0 100755 --- a/test/travis_before_install.sh +++ b/test/travis_before_install.sh @@ -7,4 +7,5 @@ sed -e "s/^.*:.*:\s//" -e "s/\s([^)]*)//g" /tmp/dpkg-builddeps > /tmp/build_depe echo "additional packages required for building:"; cat /tmp/build_depends sudo xargs --arg-file /tmp/build_depends apt-get -q --assume-yes install fakeroot dpkg -l +true From 79c05582af85a4be383e0e6c219b993616ba0f7b Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Wed, 7 Feb 2018 19:55:48 +0100 Subject: [PATCH 45/46] Univention: store automatically generated passwords Fixes #850: Univention: password of automatically created client do change --- platforms/univention/univention-bareos.py | 36 ++++++++++++++++++++--- 1 file changed, 32 insertions(+), 4 deletions(-) diff --git a/platforms/univention/univention-bareos.py b/platforms/univention/univention-bareos.py index c29b085431f..c550bcb2121 100644 --- a/platforms/univention/univention-bareos.py +++ b/platforms/univention/univention-bareos.py @@ -1,8 +1,7 @@ # -*- coding: utf-8 -*- # -# Univention SSL """Bareos Client Configuration Listener Module.""" -# + __package__ = '' # workaround for PEP 366 from listener import configRegistry, setuid, unsetuid @@ -80,6 +79,7 @@ def postrun(): return + def processClient(client_name,entry,delete=False): if client_name==None: return @@ -99,6 +99,8 @@ def processClient(client_name,entry,delete=False): addClient(client_name,client_type) + + def addClient(client_name,client_type): createClientJob(client_name,client_type) addClientInclude(client_name) @@ -110,8 +112,21 @@ def removeClient(client_name,client_type): disableClientJob(client_name,client_type) addClientInclude(client_name) + + def getClientSecret(client_name): - return createClientSecret(client_name) + path=getClientSecretPath(client_name) + password=None + + try: + f=open(path,'r') + password=f.read().strip() + except: + password=createClientSecret(client_name) + + return password + + def exportBareosFdDirectorResource(client_name, client_type): # send commands via pipe to bconsole @@ -120,11 +135,22 @@ def exportBareosFdDirectorResource(client_name, client_type): out = process.communicate(b'reload\nconfigure export client="{client_name}-fd"\n'.format(client_name=client_name))[0] ud.debug(ud.LISTENER, ud.INFO, "bareos export output:\n" + str(out)) + + def createClientSecret(client_name): + path=getClientSecretPath(client_name) + char_set = string.ascii_uppercase + string.digits + string.ascii_lowercase password=''.join(random.sample(char_set*40,40)) + os.umask(077) + with open(path,'w') as f: + f.write(password) + os.chown(path,-1,0) + return password + + def removeClientJob(client_name): path=JOBS_PATH+'/'+client_name+'.include' os.remove(path) @@ -149,6 +175,9 @@ def disableClientJob(client_name,client_type): def getClientIncludePath(client_name): return '@'+JOBS_PATH+'/'+client_name+'.include' +def getClientSecretPath(client_name): + return JOBS_PATH+'/'+client_name+'.secret' + def addClientInclude(client_name): # is the client already in the include list? if isClientIncluded(client_name): @@ -170,4 +199,3 @@ def isClientIncluded(client_name): if want in l: return True return False - From 0e3298c4eb33c0a1aa8e93421193a1e1ce03c2d6 Mon Sep 17 00:00:00 2001 From: Joerg Steffens Date: Mon, 12 Feb 2018 16:49:11 +0100 Subject: [PATCH 46/46] Build: Bump version number to 17.2.5 --- configure | 18 +++++++++--------- src/include/version.h | 12 ++++++------ 2 files changed, 15 insertions(+), 15 deletions(-) diff --git a/configure b/configure index 04b56e0ed1f..65b245550f3 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for bareos 17.2.4. +# Generated by GNU Autoconf 2.69 for bareos 17.2.5. # # # Copyright (C) 1992-1996, 1998-2012 Free Software Foundation, Inc. @@ -587,8 +587,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='bareos' PACKAGE_TARNAME='bareos' -PACKAGE_VERSION='17.2.4' -PACKAGE_STRING='bareos 17.2.4' +PACKAGE_VERSION='17.2.5' +PACKAGE_STRING='bareos 17.2.5' PACKAGE_BUGREPORT='' PACKAGE_URL='' @@ -1660,7 +1660,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures bareos 17.2.4 to adapt to many kinds of systems. +\`configure' configures bareos 17.2.5 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1730,7 +1730,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of bareos 17.2.4:";; + short | recursive ) echo "Configuration of bareos 17.2.5:";; esac cat <<\_ACEOF @@ -1965,7 +1965,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -bareos configure 17.2.4 +bareos configure 17.2.5 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2920,7 +2920,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by bareos $as_me 17.2.4, which was +It was created by bareos $as_me 17.2.5, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -34365,7 +34365,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by bareos $as_me 17.2.4, which was +This file was extended by bareos $as_me 17.2.5, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -34431,7 +34431,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -bareos config.status 17.2.4 +bareos config.status 17.2.5 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/src/include/version.h b/src/include/version.h index b42ece25086..05bdbe8fb50 100644 --- a/src/include/version.h +++ b/src/include/version.h @@ -1,19 +1,19 @@ #undef VERSION -#define VERSION "17.2.4" -#define BDATE "21 Sep 2017" -#define LSMDATE "21Sep17" +#define VERSION "17.2.5" +#define BDATE "14 Feb 2018" +#define LSMDATE "14Feb18" #define PROG_COPYRIGHT "Copyright (C) %d-2012 Free Software Foundation Europe e.V.\n" \ "Copyright (C) 2010-2017 Planets Communications B.V.\n" \ - "Copyright (C) 2013-2017 Bareos GmbH & Co. KG\n" -#define BYEAR "2017" /* year for copyright messages in programs */ + "Copyright (C) 2013-2018 Bareos GmbH & Co. KG\n" +#define BYEAR "2018" /* year for copyright messages in programs */ /* BAREOS® - Backup Archiving REcovery Open Sourced Copyright (C) 2000-2013 Free Software Foundation Europe e.V. Copyright (C) 2010-2017 Planets Communications B.V. - Copyright (C) 2013-2017 Bareos GmbH & Co. KG + Copyright (C) 2013-2018 Bareos GmbH & Co. KG This program is Free Software; you can redistribute it and/or modify it under the terms of version three of the GNU Affero General Public