Skip to content

Commit

Permalink
IPVS: Add __ip_vs_control_{init,cleanup}_sysctl()
Browse files Browse the repository at this point in the history
Break out the portions of __ip_vs_control_init() and
__ip_vs_control_cleanup() where aren't necessary when
CONFIG_SYSCTL is undefined.

Signed-off-by: Simon Horman <horms@verge.net.au>
  • Loading branch information
horms committed Mar 15, 2011
1 parent fb1de43 commit 14e4054
Showing 1 changed file with 62 additions and 36 deletions.
98 changes: 62 additions & 36 deletions net/netfilter/ipvs/ip_vs_ctl.c
Expand Up @@ -88,6 +88,8 @@ static int __ip_vs_addr_is_local_v6(struct net *net,
return 0;
}
#endif

#ifdef CONFIG_SYSCTL
/*
* update_defense_level is called from keventd and from sysctl,
* so it needs to protect itself from softirqs
Expand Down Expand Up @@ -229,6 +231,7 @@ static void defense_work_handler(struct work_struct *work)
ip_vs_random_dropentry(ipvs->net);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);
}
#endif

int
ip_vs_use_count_inc(void)
Expand Down Expand Up @@ -1511,7 +1514,7 @@ static int ip_vs_zero_all(struct net *net)
return 0;
}


#ifdef CONFIG_SYSCTL
static int
proc_do_defense_mode(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
Expand All @@ -1533,7 +1536,6 @@ proc_do_defense_mode(ctl_table *table, int write,
return rc;
}


static int
proc_do_sync_threshold(ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos)
Expand Down Expand Up @@ -1767,6 +1769,7 @@ const struct ctl_path net_vs_ctl_path[] = {
{ }
};
EXPORT_SYMBOL_GPL(net_vs_ctl_path);
#endif

#ifdef CONFIG_PROC_FS

Expand Down Expand Up @@ -3511,7 +3514,8 @@ static void ip_vs_genl_unregister(void)
/*
* per netns intit/exit func.
*/
int __net_init __ip_vs_control_init(struct net *net)
#ifdef CONFIG_SYSCTL
int __net_init __ip_vs_control_init_sysctl(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);
Expand All @@ -3521,33 +3525,11 @@ int __net_init __ip_vs_control_init(struct net *net)
spin_lock_init(&ipvs->dropentry_lock);
spin_lock_init(&ipvs->droppacket_lock);
spin_lock_init(&ipvs->securetcp_lock);
ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);

/* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
INIT_LIST_HEAD(&ipvs->rs_table[idx]);

INIT_LIST_HEAD(&ipvs->dest_trash);
atomic_set(&ipvs->ftpsvc_counter, 0);
atomic_set(&ipvs->nullsvc_counter, 0);

/* procfs stats */
ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (!ipvs->tot_stats.cpustats) {
pr_err("%s() alloc_percpu failed\n", __func__);
goto err_alloc;
}
spin_lock_init(&ipvs->tot_stats.lock);

proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
&ip_vs_stats_percpu_fops);

if (!net_eq(net, &init_net)) {
tbl = kmemdup(vs_vars, sizeof(vs_vars), GFP_KERNEL);
if (tbl == NULL)
goto err_dup;
return -ENOMEM;
} else
tbl = vs_vars;
/* Initialize sysctl defaults */
Expand Down Expand Up @@ -3576,25 +3558,73 @@ int __net_init __ip_vs_control_init(struct net *net)
tbl[idx++].data = &ipvs->sysctl_nat_icmp_send;


#ifdef CONFIG_SYSCTL
ipvs->sysctl_hdr = register_net_sysctl_table(net, net_vs_ctl_path,
tbl);
if (ipvs->sysctl_hdr == NULL) {
if (!net_eq(net, &init_net))
kfree(tbl);
goto err_dup;
return -ENOMEM;
}
#endif
ip_vs_start_estimator(net, &ipvs->tot_stats);
ipvs->sysctl_tbl = tbl;
/* Schedule defense work */
INIT_DELAYED_WORK(&ipvs->defense_work, defense_work_handler);
schedule_delayed_work(&ipvs->defense_work, DEFENSE_TIMER_PERIOD);

return 0;
}

void __net_init __ip_vs_control_cleanup_sysctl(struct net *net)
{
struct netns_ipvs *ipvs = net_ipvs(net);

cancel_delayed_work_sync(&ipvs->defense_work);
cancel_work_sync(&ipvs->defense_work.work);
unregister_net_sysctl_table(ipvs->sysctl_hdr);
}

err_dup:
#else

int __net_init __ip_vs_control_init_sysctl(struct net *net) { return 0; }
void __net_init __ip_vs_control_cleanup_sysctl(struct net *net) { }

#endif

int __net_init __ip_vs_control_init(struct net *net)
{
int idx;
struct netns_ipvs *ipvs = net_ipvs(net);

ipvs->rs_lock = __RW_LOCK_UNLOCKED(ipvs->rs_lock);

/* Initialize rs_table */
for (idx = 0; idx < IP_VS_RTAB_SIZE; idx++)
INIT_LIST_HEAD(&ipvs->rs_table[idx]);

INIT_LIST_HEAD(&ipvs->dest_trash);
atomic_set(&ipvs->ftpsvc_counter, 0);
atomic_set(&ipvs->nullsvc_counter, 0);

/* procfs stats */
ipvs->tot_stats.cpustats = alloc_percpu(struct ip_vs_cpu_stats);
if (ipvs->tot_stats.cpustats) {
pr_err("%s(): alloc_percpu.\n", __func__);
return -ENOMEM;
}
spin_lock_init(&ipvs->tot_stats.lock);

proc_net_fops_create(net, "ip_vs", 0, &ip_vs_info_fops);
proc_net_fops_create(net, "ip_vs_stats", 0, &ip_vs_stats_fops);
proc_net_fops_create(net, "ip_vs_stats_percpu", 0,
&ip_vs_stats_percpu_fops);

if (__ip_vs_control_init_sysctl(net))
goto err;

return 0;

err:
free_percpu(ipvs->tot_stats.cpustats);
err_alloc:
return -ENOMEM;
}

Expand All @@ -3604,11 +3634,7 @@ static void __net_exit __ip_vs_control_cleanup(struct net *net)

ip_vs_trash_cleanup(net);
ip_vs_stop_estimator(net, &ipvs->tot_stats);
cancel_delayed_work_sync(&ipvs->defense_work);
cancel_work_sync(&ipvs->defense_work.work);
#ifdef CONFIG_SYSCTL
unregister_net_sysctl_table(ipvs->sysctl_hdr);
#endif
__ip_vs_control_cleanup_sysctl(net);
proc_net_remove(net, "ip_vs_stats_percpu");
proc_net_remove(net, "ip_vs_stats");
proc_net_remove(net, "ip_vs");
Expand Down

0 comments on commit 14e4054

Please sign in to comment.