diff --git a/src/dird/dird.c b/src/dird/dird.c index cf97fc3a3ab..703bc94c106 100644 --- a/src/dird/dird.c +++ b/src/dird/dird.c @@ -920,8 +920,16 @@ static bool check_resources() /* * Loop over Clients */ + me->subscriptions_used = 0; CLIENTRES *client; foreach_res(client, R_CLIENT) { + /* + * Count the number of clients + * + * Only used as indication not an enforced limit. + */ + me->subscriptions_used++; + /* * tls_require implies tls_enable */ diff --git a/src/dird/dird_conf.c b/src/dird/dird_conf.c index f6f2058b50c..ca23183000a 100644 --- a/src/dird/dird_conf.c +++ b/src/dird/dird_conf.c @@ -121,6 +121,7 @@ static RES_ITEM dir_items[] = { { "piddirectory", store_dir, ITEM(res_dir.pid_directory), 0, ITEM_DEFAULT, _PATH_BAREOS_PIDDIR }, { "plugindirectory", store_dir, ITEM(res_dir.plugin_directory), 0, 0, NULL }, { "scriptsdirectory", store_dir, ITEM(res_dir.scripts_directory), 0, 0, NULL }, + { "subscriptions", store_pint32, ITEM(res_dir.subscriptions), 0, ITEM_DEFAULT, "0" }, { "subsysdirectory", store_dir, ITEM(res_dir.subsys_directory), 0, 0, NULL }, { "maximumconcurrentjobs", store_pint32, ITEM(res_dir.MaxConcurrentJobs), 0, ITEM_DEFAULT, "1" }, { "maximumconsoleconnections", store_pint32, ITEM(res_dir.MaxConsoleConnect), 0, ITEM_DEFAULT, "20" }, @@ -707,10 +708,14 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fm } switch (type) { case R_DIRECTOR: - sendit(sock, _("Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s\n"), - reshdr->name, res->res_dir.MaxConcurrentJobs, + sendit(sock, _("Director: name=%s MaxJobs=%d FDtimeout=%s SDtimeout=%s \n" + " subscriptions=%d, subscriptions_used=%d\n"), + reshdr->name, + res->res_dir.MaxConcurrentJobs, edit_uint64(res->res_dir.FDConnectTimeout, ed1), - edit_uint64(res->res_dir.SDConnectTimeout, ed2)); + edit_uint64(res->res_dir.SDConnectTimeout, ed2), + res->res_dir.subscriptions, + res->res_dir.subscriptions_used); if (res->res_dir.query_file) { sendit(sock, _(" query_file=%s\n"), res->res_dir.query_file); } @@ -718,6 +723,7 @@ void dump_resource(int type, RES *reshdr, void sendit(void *sock, const char *fm sendit(sock, _(" --> ")); dump_resource(-R_MSGS, (RES *)res->res_dir.messages, sendit, sock); } + break; case R_CONSOLE: sendit(sock, _("Console: name=%s SSL=%d\n"), diff --git a/src/dird/dird_conf.h b/src/dird/dird_conf.h index 99326af5290..24c9105331d 100644 --- a/src/dird/dird_conf.h +++ b/src/dird/dird_conf.h @@ -129,6 +129,8 @@ class DIRRES { bool optimize_for_speed; /* Optimize daemon for speed which may need more memory */ bool ndmp_snooping; /* NDMP Protocol specific snooping enabled */ uint32_t ndmp_loglevel; /* NDMP Protocol specific loglevel to use */ + uint32_t subscriptions; /* Number of subscribtions available */ + uint32_t subscriptions_used; /* Number of subscribtions used */ char *verid; /* Custom Id to print in version command */ char *keyencrkey; /* Key Encryption Key */ diff --git a/src/dird/job.c b/src/dird/job.c index d334f88f617..e99ab5ae89a 100644 --- a/src/dird/job.c +++ b/src/dird/job.c @@ -481,6 +481,15 @@ static void *job_thread(void *arg) break; } + /* + * Check for subscriptions and issue a warning when exceeded. + */ + if (me->subscriptions && + me->subscriptions < me->subscriptions_used) { + Jmsg(jcr, M_WARNING, 0, _("Subscriptions exceeded: (used/total) (%d/%d)\n"), + me->subscriptions_used, me->subscriptions); + } + run_scripts(jcr, jcr->res.job->RunScripts, "AfterJob"); /* diff --git a/src/dird/ua_cmds.c b/src/dird/ua_cmds.c index 6debd3f3a1d..127fcc7360c 100644 --- a/src/dird/ua_cmds.c +++ b/src/dird/ua_cmds.c @@ -187,7 +187,8 @@ static struct cmdstruct commands[] = { "\tcomment= yes"), false }, { NT_("status"), status_cmd, _("Report status"), NT_("all | dir= | director | scheduler | schedule= | client= |\n" - "\tstorage= slots | days= | job= | schedule="), true }, + "\tstorage= slots | days= | job= | schedule= |\n" + "\tsubscriptions" ), true }, { NT_("setbandwidth"), setbwlimit_cmd, _("Sets bandwidth"), NT_("client= | storage= | jobid= |\n" "\tjob= | ujobid= state= | all\n" diff --git a/src/dird/ua_status.c b/src/dird/ua_status.c index 26a300b388d..bcc3f1fa861 100644 --- a/src/dird/ua_status.c +++ b/src/dird/ua_status.c @@ -36,6 +36,7 @@ static void list_running_jobs(UAContext *ua); static void list_terminated_jobs(UAContext *ua); static void do_director_status(UAContext *ua); static void do_scheduler_status(UAContext *ua); +static bool do_subscription_status(UAContext *ua); static void do_all_status(UAContext *ua); static void status_slots(UAContext *ua, STORERES *store); static void status_content(UAContext *ua, STORERES *store); @@ -176,6 +177,12 @@ int status_cmd(UAContext *ua, const char *cmd) } else if (bstrncasecmp(ua->argk[i], NT_("sched"), 5)) { do_scheduler_status(ua); return 1; + } else if (bstrncasecmp(ua->argk[i], NT_("sub"), 3)) { + if (do_subscription_status(ua)) { + return 1; + } else { + return 0; + } } else { store = get_storage_resource(ua, false/*no default*/); if (store) { @@ -527,6 +534,45 @@ static bool show_scheduled_preview(UAContext *ua, SCHEDRES *sched, return true; } +/* + * Check the number of clients in the DB against the configured number of subscriptions + * + * Return true if (number of clients < number of subscriptions), else + * return false + */ +static bool do_subscription_status(UAContext *ua) +{ + int available; + bool retval = false; + + /* + * See if we need to check. + */ + if (me->subscriptions == 0) { + ua->send_msg(_("No subscriptions configured in director.\n")); + retval = true; + goto bail_out; + } + + if (me->subscriptions_used <= 0) { + ua->error_msg(_("No clients defined.\n")); + goto bail_out; + } else { + available = me->subscriptions - me->subscriptions_used; + if (available < 0) { + ua->send_msg(_("Warning! No available subscriptions: %d (%d/%d) (used/total)\n"), + available, me->subscriptions_used, me->subscriptions); + } else { + ua->send_msg(_("Ok: available subscriptions: %d (%d/%d) (used/total)\n"), + available, me->subscriptions_used, me->subscriptions); + retval = true; + } + } + +bail_out: + return retval; +} + static void do_scheduler_status(UAContext *ua) { int i; @@ -636,7 +682,7 @@ static void do_scheduler_status(UAContext *ua) /* * Build an overview. */ - if ( days > 0 ) { /* future */ + if (days > 0) { /* future */ start = now; stop = now + (days * seconds_per_day); } else { /* past */ @@ -858,7 +904,7 @@ static void list_scheduled_jobs(UAContext *ua) if (!acl_access_ok(ua, Job_ACL, job->name()) || !job->enabled) { continue; } - for (run=NULL; (run = find_next_run(run, job, runtime, days)); ) { + for (run = NULL; (run = find_next_run(run, job, runtime, days)); ) { USTORERES store; level = job->JobLevel; if (run->level) {