diff --git a/causalml/inference/meta/rlearner.py b/causalml/inference/meta/rlearner.py index d61b5962..2859f0cd 100644 --- a/causalml/inference/meta/rlearner.py +++ b/causalml/inference/meta/rlearner.py @@ -1,13 +1,14 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from future.builtins import super from copy import deepcopy import logging import pandas as pd import numpy as np from scipy.stats import norm from sklearn.model_selection import cross_val_predict, KFold - +from .utils import check_control_in_treatment, check_p_conditions logger = logging.getLogger('causalml') @@ -57,22 +58,73 @@ def __init__(self, self.cv = KFold(n_splits=n_fold, shuffle=True, random_state=random_state) - self.t_var = 0.0 - self.c_var = 0.0 - def __repr__(self): return ('{}(model_mu={},\n' '\tmodel_tau={})'.format(self.__class__.__name__, self.model_mu.__repr__(), self.model_tau.__repr__())) + def fit(self, X, p, treatment, y, verbose=True): + """Fit the treatment effect and outcome models of the R learner. + + Args: + X (np.matrix): a feature matrix + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) + treatment (np.array): a treatment vector + y (np.array): an outcome vector + """ + check_control_in_treatment(treatment, self.control_name) + self.t_groups = np.unique(treatment[treatment != self.control_name]) + self.t_groups.sort() + check_p_conditions(p, self.t_groups) + if isinstance(p, np.ndarray): + treatment_name = self.t_groups[0] + p = {treatment_name: p} + + self._classes = {group: i for i, group in enumerate(self.t_groups)} + self.models_tau = {group: deepcopy(self.model_tau) for group in self.t_groups} + self.vars_c = {} + self.vars_t = {} + + if verbose: + logger.info('generating out-of-fold CV outcome estimates') + yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv) + + for group in self.t_groups: + w = (treatment == group).astype(int) + + if verbose: + logger.info('training the treatment effect model for {} with R-loss'.format(group)) + self.models_tau[group].fit(X, (y - yhat) / (w - p[group]), sample_weight=(w - p[group]) ** 2) + + self.vars_c[group] = (y[w == 0] - yhat[w == 0]).var() + self.vars_t[group] = (y[w == 1] - yhat[w == 1]).var() + + def predict(self, X): + """Predict treatment effects. + + Args: + X (np.matrix): a feature matrix + + Returns: + (numpy.ndarray): Predictions of treatment effects. + """ + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + for i, group in enumerate(self.t_groups): + dhat = self.models_tau[group].predict(X) + te[:, i] = dhat + + return te + def fit_predict(self, X, p, treatment, y, return_ci=False, - n_bootstraps=1000, bootstrap_size=10000, verbose=False): + n_bootstraps=1000, bootstrap_size=10000, verbose=True): """Fit the treatment effect and outcome models of the R learner and predict treatment effects. Args: X (np.matrix): a feature matrix - p (np.array): a propensity vector between 0 and 1 treatment (np.array): a treatment vector + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) y (np.array): an outcome vector return_ci (bool): whether to return confidence intervals n_bootstraps (int): number of bootstrap iterations @@ -87,21 +139,36 @@ def fit_predict(self, X, p, treatment, y, return_ci=False, self.fit(X, p, treatment, y) te = self.predict(X) + check_p_conditions(p, self.t_groups) + if isinstance(p, np.ndarray): + treatment_name = self.t_groups[0] + p = {treatment_name: p} + if not return_ci: return te else: start = pd.datetime.today() - te_bootstraps = np.zeros(shape=(X.shape[0], n_bootstraps)) + self.t_groups_global = self.t_groups + self._classes_global = self._classes + self.model_mu_global = deepcopy(self.model_mu) + self.models_tau_global = deepcopy(self.models_tau) + te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)) for i in range(n_bootstraps): te_b = self.bootstrap(X, p, treatment, y, size=bootstrap_size) - te_bootstraps[:, i] = np.ravel(te_b) - if verbose: + te_bootstraps[:, :, i] = te_b + if verbose and i % 10 == 0 and i > 0: now = pd.datetime.today() - lapsed = (now - start).seconds / 60 - logger.info('{}/{} bootstraps completed. ({:.01f} min ' 'lapsed)'.format(i + 1, n_bootstraps, lapsed)) + lapsed = (now-start).seconds + logger.info('{}/{} bootstraps completed. ({}s lapsed)'.format(i, n_bootstraps, lapsed)) - te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=1) - te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1) + te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2) + te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2) + + # set member variables back to global (currently last bootstrapped outcome) + self.t_groups = self.t_groups_global + self._classes = self._classes_global + self.model_mu = self.model_mu_global + self.models_tau = self.models_tau_global return (te, te_lower, te_upper) @@ -110,75 +177,53 @@ def estimate_ate(self, X, p, treatment, y): Args: X (np.matrix): a feature matrix - p (np.array): a propensity vector between 0 and 1 + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array): a treatment vector y (np.array): an outcome vector Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ - dhat = self.fit_predict(X, p, treatment, y) + te = self.fit_predict(X, p, treatment, y) - te = dhat.mean() - prob_treatment = float(sum(treatment != self.control_name)) / X.shape[0] + check_p_conditions(p, self.t_groups) + if isinstance(p, np.ndarray): + treatment_name = self.t_groups[0] + p = {treatment_name: p} - se = np.sqrt(self.t_var / prob_treatment + self.c_var / (1 - prob_treatment) + dhat.var()) / X.shape[0] + ate = np.zeros(self.t_groups.shape[0]) + ate_lb = np.zeros(self.t_groups.shape[0]) + ate_ub = np.zeros(self.t_groups.shape[0]) - te_lb = te - se * norm.ppf(1 - self.ate_alpha / 2) - te_ub = te + se * norm.ppf(1 - self.ate_alpha / 2) + for i, group in enumerate(self.t_groups): + w = (treatment == group).astype(int) + prob_treatment = float(sum(w)) / X.shape[0] + _ate = te[:, i].mean() - return te, te_lb, te_ub + se = (np.sqrt((self.vars_t[group] / prob_treatment) + + (self.vars_c[group] / (1 - prob_treatment)) + + te[:, i].var()) + / X.shape[0]) - def fit(self, X, p, treatment, y): - """Fit the treatment effect and outcome models of the R learner. + _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) + _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) - Args: - X (np.matrix): a feature matrix - p (np.array): a propensity vector between 0 and 1 - treatment (np.array): a treatment vector - y (np.array): an outcome vector - """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) - - t_groups = np.unique(treatment[is_treatment]) - self._classes = {} - # this should be updated for multi-treatment case - self._classes[t_groups[0]] = 0 - - logger.info('generating out-of-fold CV outcome estimates with {}'.format(self.model_mu)) - - yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv) - - logger.info('training the treatment effect model, {} with R-loss'.format(self.model_tau)) - self.model_tau.fit(X, (y - yhat) / (w - p), sample_weight=(w - p) ** 2) - - self.t_var = (y[w == 1] - yhat[w == 1]).var() - self.c_var = (y[w == 0] - yhat[w == 0]).var() - - def predict(self, X): - """Predict treatment effects. - - Args: - X (np.matrix): a feature matrix - - Returns: - (numpy.ndarray): Predictions of treatment effects. - """ - - dhat = self.model_tau.predict(X) + ate[i] = _ate + ate_lb[i] = _ate_lb + ate_ub[i] = _ate_ub - return dhat.reshape(-1, 1) + return ate, ate_lb, ate_ub def bootstrap(self, X, p, treatment, y, size=10000): """Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population.""" idxs = np.random.choice(np.arange(0, X.shape[0]), size=size) X_b = X[idxs] - p_b = p[idxs] + p_b = {group: _p[idxs] for group, _p in p.items()} treatment_b = treatment[idxs] y_b = y[idxs] - self.fit(X=X_b, p=p_b, treatment=treatment_b, y=y_b) + self.fit(X=X_b, p=p_b, treatment=treatment_b, y=y_b, verbose=False) te_b = self.predict(X=X) return te_b @@ -208,7 +253,7 @@ def __init__(self, n_fold (int, optional): the number of cross validation folds for outcome_learner random_state (int or RandomState, optional): a seed (int) or random number generater (RandomState) """ - super(BaseRRegressor, self).__init__( + super().__init__( learner=learner, outcome_learner=outcome_learner, effect_learner=effect_learner, @@ -244,7 +289,7 @@ def __init__(self, n_fold (int, optional): the number of cross validation folds for outcome_learner random_state (int or RandomState, optional): a seed (int) or random number generater (RandomState) """ - super(BaseRClassifier, self).__init__( + super().__init__( learner=learner, outcome_learner=outcome_learner, effect_learner=effect_learner, @@ -256,30 +301,55 @@ def __init__(self, if (outcome_learner is None) and (effect_learner is None): raise ValueError("Either the outcome learner or the effect learner must be specified.") - def fit(self, X, p, treatment, y): + def fit(self, X, p, treatment, y, verbose=True): """Fit the treatment effect and outcome models of the R learner. Args: X (np.matrix): a feature matrix - p (np.array): a propensity vector between 0 and 1 + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array): a treatment vector y (np.array): an outcome vector """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) + check_control_in_treatment(treatment, self.control_name) + self.t_groups = np.unique(treatment[treatment != self.control_name]) + self.t_groups.sort() + check_p_conditions(p, self.t_groups) + if isinstance(p, np.ndarray): + treatment_name = self.t_groups[0] + p = {treatment_name: p} - t_groups = np.unique(treatment[is_treatment]) - self._classes = {} - # this should be updated for multi-treatment case - self._classes[t_groups[0]] = 0 + self._classes = {group: i for i, group in enumerate(self.t_groups)} + self.models_tau = {group: deepcopy(self.model_tau) for group in self.t_groups} + self.vars_c = {} + self.vars_t = {} - logger.info('generating out-of-fold CV outcome estimates with {}'.format(self.model_mu)) + if verbose: + logger.info('generating out-of-fold CV outcome estimates') + yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv, method='predict_proba')[:, 1] - yhat = cross_val_predict(self.model_mu, X, y, cv=self.cv, - method='predict_proba')[:, 1] + for group in self.t_groups: + w = (treatment == group).astype(int) - logger.info('training the treatment effect model, {} with R-loss'.format(self.model_tau)) - self.model_tau.fit(X, (y - yhat) / (w - p), sample_weight=(w - p) ** 2) + if verbose: + logger.info('training the treatment effect model for {} with R-loss'.format(group)) + self.models_tau[group].fit(X, (y - yhat) / (w - p[group]), sample_weight=(w - p[group]) ** 2) + + self.vars_c[group] = (y[w == 0] - yhat[w == 0]).var() + self.vars_t[group] = (y[w == 1] - yhat[w == 1]).var() + + def predict(self, X): + """Predict treatment effects. + + Args: + X (np.matrix): a feature matrix + + Returns: + (numpy.ndarray): Predictions of treatment effects. + """ + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + for i, group in enumerate(self.t_groups): + dhat = self.models_tau[group].predict(X) + te[:, i] = dhat - self.t_var = (y[w == 1] - yhat[w == 1]).var() - self.c_var = (y[w == 0] - yhat[w == 0]).var() + return te diff --git a/causalml/inference/meta/slearner.py b/causalml/inference/meta/slearner.py index aae41938..33fc6452 100644 --- a/causalml/inference/meta/slearner.py +++ b/causalml/inference/meta/slearner.py @@ -1,6 +1,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from future.builtins import super import logging import pandas as pd import numpy as np @@ -8,7 +9,7 @@ from sklearn.metrics import mean_squared_error as mse from sklearn.metrics import mean_absolute_error as mae import statsmodels.api as sm - +from copy import deepcopy logger = logging.getLogger('causalml') @@ -73,18 +74,17 @@ def fit(self, X, treatment, y): treatment (np.array): a treatment vector y (np.array): an outcome vector """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) - - t_groups = np.unique(treatment[is_treatment]) - self._classes = {} + self.t_groups = np.unique(treatment[treatment != self.control_name]) + self.t_groups.sort() + self._classes = {group: i for i, group in enumerate(self.t_groups)} + self.models = {group: deepcopy(self.model) for group in self.t_groups} - # this should be updated for multi-treatment case - self._classes[t_groups[0]] = 0 - X = np.hstack((w.reshape((-1, 1)), X)) - self.model.fit(X, y) + for group in self.t_groups: + w = (treatment == group).astype(int) + X_new = np.hstack((w.reshape((-1, 1)), X)) + self.models[group].fit(X_new, y) - def predict(self, X, treatment, y=None): + def predict(self, X, treatment, y=None, verbose=True): """Predict treatment effects. Args: X (np.matrix): a feature matrix @@ -93,34 +93,42 @@ def predict(self, X, treatment, y=None): Returns: (numpy.ndarray): Predictions of treatment effects. """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) - - X = np.hstack((w.reshape((-1, 1)), X)) - - X[:, 0] = 0 # set the treatment column to zero (the control group) - yhat_c = self.model.predict(X) - - X[:, 0] = 1 # set the treatment column to one (the treatment group) - yhat_t = self.model.predict(X) - - if y is not None: - logger.info('RMSE (Control): {:.6f}'.format( - np.sqrt(mse(y[~is_treatment], yhat_c[~is_treatment]))) - ) - logger.info(' MAE (Control): {:.6f}'.format( - mae(y[~is_treatment], yhat_c[~is_treatment])) - ) - logger.info('RMSE (Treatment): {:.6f}'.format( - np.sqrt(mse(y[is_treatment], yhat_t[is_treatment]))) - ) - logger.info(' MAE (Treatment): {:.6f}'.format( - mae(y[is_treatment], yhat_t[is_treatment])) - ) - - return (yhat_t - yhat_c).reshape(-1, 1) - - def fit_predict(self, X, treatment, y, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, verbose=False): + yhat_cs = {} + yhat_ts = {} + + for group in self.t_groups: + w = (treatment != group).astype(int) + model = self.models[group] + X_new = np.hstack((w.reshape((-1, 1)), X)) + + X_new[:, 0] = 0 # set the treatment column to zero (the control group) + yhat_cs[group] = model.predict(X_new) + X_new[:, 0] = 1 # set the treatment column to one (the treatment group) + yhat_ts[group] = model.predict(X_new) + + if y is not None and verbose: + for group in self.t_groups: + logger.info('Error metrics for {}'.format(group)) + logger.info('RMSE (Control): {:.6f}'.format( + np.sqrt(mse(y[treatment != group], yhat_cs[group][treatment != group]))) + ) + logger.info(' MAE (Control): {:.6f}'.format( + mae(y[treatment != group], yhat_cs[group][treatment != group])) + ) + logger.info('RMSE (Treatment): {:.6f}'.format( + np.sqrt(mse(y[treatment == group], yhat_ts[group][treatment == group]))) + ) + logger.info(' MAE (Treatment): {:.6f}'.format( + mae(y[treatment == group], yhat_ts[group][treatment == group])) + ) + + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + for i, group in enumerate(self.t_groups): + te[:, i] = yhat_ts[group] - yhat_cs[group] + + return te + + def fit_predict(self, X, treatment, y, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, verbose=True): """Fit the inference model of the S learner and predict treatment effects. Args: X (np.matrix): a feature matrix @@ -142,23 +150,43 @@ def fit_predict(self, X, treatment, y, return_ci=False, n_bootstraps=1000, boots return te else: start = pd.datetime.today() - te_bootstraps = np.zeros(shape=(X.shape[0], n_bootstraps)) + self.t_groups_global = self.t_groups + self._classes_global = self._classes + self.models_global = deepcopy(self.models) + te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)) for i in range(n_bootstraps): te_b = self.bootstrap(X, treatment, y, size=bootstrap_size) - te_bootstraps[:, i] = np.ravel(te_b) - if verbose: + te_bootstraps[:, :, i] = te_b + if verbose and i % 10 == 0 and i > 0: now = pd.datetime.today() - lapsed = (now - start).seconds / 60 - logger.info('{}/{} bootstraps completed. ({:.01f} min lapsed)'.format(i + 1, n_bootstraps, lapsed)) + lapsed = (now-start).seconds + logger.info('{}/{} bootstraps completed. ({}s lapsed)'.format(i+1, n_bootstraps, lapsed)) - te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=1) - te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1) + te_lower = np.percentile(te_bootstraps, (self.ate_alpha/2)*100, axis=2) + te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2) + + # set member variables back to global (currently last bootstrapped outcome) + self.t_groups = self.t_groups_global + self._classes = self._classes_global + self.models = self.models_global return (te, te_lower, te_upper) - def estimate_ate(self, X, treatment, y): - te, te_lb, te_ub = self.fit_predict(X, treatment, y, return_ci=True) - return te.mean(), te_lb.mean(), te_ub.mean() + def estimate_ate(self, X, treatment, y, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, verbose=True): + if return_ci: + te, te_lb, te_ub = self.fit_predict(X, treatment, y, return_ci=True, n_bootstraps=n_bootstraps, + bootstrap_size=bootstrap_size, verbose=verbose) + + ate = te.mean(axis=0) + ate_lb = te_lb.mean(axis=0) + ate_ub = te_ub.mean(axis=0) + return ate, ate_lb, ate_ub + + else: + te = self.fit_predict(X, treatment, y, return_ci=False, n_bootstraps=n_bootstraps, + bootstrap_size=bootstrap_size, verbose=verbose) + ate = te.mean(axis=0) + return ate def bootstrap(self, X, treatment, y, size=10000): """Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population. @@ -168,7 +196,7 @@ def bootstrap(self, X, treatment, y, size=10000): treatment_b = treatment[idxs] y_b = y[idxs] self.fit(X=X_b, treatment=treatment_b, y=y_b) - te_b = self.predict(X=X, treatment=treatment, y=y) + te_b = self.predict(X=X, treatment=treatment, verbose=False) return te_b @@ -183,7 +211,7 @@ def __init__(self, learner=None, ate_alpha=0.05, control_name=0): learner (optional): a model to estimate the treatment effect control_name (str or int, optional): name of control group """ - super(BaseSRegressor, self).__init__( + super().__init__( learner=learner, ate_alpha=ate_alpha, control_name=control_name) @@ -201,12 +229,12 @@ def __init__(self, learner=None, ate_alpha=0.05, control_name=0): Should have a predict_proba() method. control_name (str or int, optional): name of control group """ - super(BaseSClassifier, self).__init__( + super().__init__( learner=learner, ate_alpha=ate_alpha, control_name=control_name) - def predict(self, X, treatment, y=None): + def predict(self, X, treatment, y=None, verbose=True): """Predict treatment effects. Args: X (np.matrix): a feature matrix @@ -215,32 +243,40 @@ def predict(self, X, treatment, y=None): Returns: (numpy.ndarray): Predictions of treatment effects. """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) - - X = np.hstack((w.reshape((-1, 1)), X)) - - X[:, 0] = 0 # set the treatment column to zero (the control group) - yhat_c = self.model.predict_proba(X)[:, 1] - - X[:, 0] = 1 # set the treatment column to one (the treatment group) - yhat_t = self.model.predict_proba(X)[:, 1] - - if y is not None: - logger.info('RMSE (Control): {:.6f}'.format( - np.sqrt(mse(y[~is_treatment], yhat_c[~is_treatment]))) - ) - logger.info(' MAE (Control): {:.6f}'.format( - mae(y[~is_treatment], yhat_c[~is_treatment])) - ) - logger.info('RMSE (Treatment): {:.6f}'.format( - np.sqrt(mse(y[is_treatment], yhat_t[is_treatment]))) - ) - logger.info(' MAE (Treatment): {:.6f}'.format( - mae(y[is_treatment], yhat_t[is_treatment])) - ) - - return (yhat_t - yhat_c).reshape(-1, 1) + yhat_cs = {} + yhat_ts = {} + + for group in self.t_groups: + w = (treatment != group).astype(int) + model = self.models[group] + X_new = np.hstack((w.reshape((-1, 1)), X)) + + X_new[:, 0] = 0 # set the treatment column to zero (the control group) + yhat_cs[group] = model.predict_proba(X_new)[:, 1] + X_new[:, 0] = 1 # set the treatment column to one (the treatment group) + yhat_ts[group] = model.predict_proba(X_new)[:, 1] + + if y is not None and verbose: + for group in self.t_groups: + logger.info('Error metrics for {}'.format(group)) + logger.info('RMSE (Control): {:.6f}'.format( + np.sqrt(mse(y[treatment != group], yhat_cs[group][treatment != group]))) + ) + logger.info(' MAE (Control): {:.6f}'.format( + mae(y[treatment != group], yhat_cs[group][treatment != group])) + ) + logger.info('RMSE (Treatment): {:.6f}'.format( + np.sqrt(mse(y[treatment == group], yhat_ts[group][treatment == group]))) + ) + logger.info(' MAE (Treatment): {:.6f}'.format( + mae(y[treatment == group], yhat_ts[group][treatment == group])) + ) + + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + for i, group in enumerate(self.t_groups): + te[:, i] = yhat_ts[group] - yhat_cs[group] + + return te class LRSRegressor(BaseSRegressor): @@ -250,7 +286,7 @@ def __init__(self, ate_alpha=.05, control_name=0): ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ - super(LRSRegressor, self).__init__(StatsmodelsOLS(alpha=ate_alpha), ate_alpha, control_name) + super().__init__(StatsmodelsOLS(alpha=ate_alpha), ate_alpha, control_name) def estimate_ate(self, X, treatment, y): """Estimate the Average Treatment Effect (ATE). @@ -261,11 +297,15 @@ def estimate_ate(self, X, treatment, y): Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) - - self.fit(X, w, y) - te = self.model.coefficients[0] - te_lb = self.model.conf_ints[0, 0] - te_ub = self.model.conf_ints[0, 1] - return te, te_lb, te_ub + self.fit(X, treatment, y) + + ate = np.zeros(self.t_groups.shape[0]) + ate_lb = np.zeros(self.t_groups.shape[0]) + ate_ub = np.zeros(self.t_groups.shape[0]) + + for i, group in enumerate(self.t_groups): + ate[i] = self.models[group].coefficients[0] + ate_lb[i] = self.models[group].conf_ints[0, 0] + ate_ub[i] = self.models[group].conf_ints[0, 1] + + return ate, ate_lb, ate_ub \ No newline at end of file diff --git a/causalml/inference/meta/tlearner.py b/causalml/inference/meta/tlearner.py index c63cc920..027a7439 100644 --- a/causalml/inference/meta/tlearner.py +++ b/causalml/inference/meta/tlearner.py @@ -1,6 +1,7 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from future.builtins import super from copy import deepcopy import logging import pandas as pd @@ -64,20 +65,19 @@ def fit(self, X, treatment, y): treatment (np.array): a treatment vector y (np.array): an outcome vector """ - is_treatment = treatment != self.control_name - - t_groups = np.unique(treatment[is_treatment]) - self._classes = {} - # this should be updated for multi-treatment case - self._classes[t_groups[0]] = 0 - - logger.info('Training a control group model') - self.model_c.fit(X[~is_treatment], y[~is_treatment]) - - logger.info('Training a treatment group model') - self.model_t.fit(X[is_treatment], y[is_treatment]) - - def predict(self, X, treatment=None, y=None): + self.t_groups = np.unique(treatment[treatment != self.control_name]) + self.t_groups.sort() + self._classes = {group: i for i, group in enumerate(self.t_groups)} + self.models_c = {group: deepcopy(self.model_c) for group in self.t_groups} + self.models_t = {group: deepcopy(self.model_t) for group in self.t_groups} + + for group in self.t_groups: + w = (treatment == group).astype(int) + X_new = np.hstack((w.reshape((-1, 1)), X)) + self.models_c[group].fit(X_new[w == 0], y[w == 0]) + self.models_t[group].fit(X_new[w == 1], y[w == 1]) + + def predict(self, X, treatment=None, y=None, return_components=False, verbose=True): """Predict treatment effects. Args: @@ -88,19 +88,45 @@ def predict(self, X, treatment=None, y=None): Returns: (numpy.ndarray): Predictions of treatment effects. """ - yhat_c = self.model_c.predict(X) - yhat_t = self.model_t.predict(X) - - if (y is not None) and (treatment is not None): - is_treatment = treatment != self.control_name - logger.info('RMSE (Control): {:.6f}'.format(np.sqrt(mse(y[~is_treatment], yhat_c[~is_treatment])))) - logger.info(' MAE (Control): {:.6f}'.format(mae(y[~is_treatment], yhat_c[~is_treatment]))) - logger.info('RMSE (Treatment): {:.6f}'.format(np.sqrt(mse(y[is_treatment], yhat_t[is_treatment])))) - logger.info(' MAE (Treatment): {:.6f}'.format(mae(y[is_treatment], yhat_t[is_treatment]))) - - return (yhat_t - yhat_c).reshape(-1, 1) + yhat_cs = {} + yhat_ts = {} + + for group in self.t_groups: + w = (treatment != group).astype(int) + X_new = np.hstack((w.reshape((-1, 1)), X)) + + model_c = self.models_c[group] + model_t = self.models_t[group] + yhat_cs[group] = model_c.predict(X_new) + yhat_ts[group] = model_t.predict(X_new) + + if (y is not None) and (treatment is not None) and verbose: + for group in self.t_groups: + logger.info('Error metrics for {}'.format(group)) + logger.info('RMSE (Control): {:.6f}'.format( + np.sqrt(mse(y[treatment != group], yhat_cs[group][treatment != group]))) + ) + logger.info(' MAE (Control): {:.6f}'.format( + mae(y[treatment != group], yhat_cs[group][treatment != group])) + ) + logger.info('RMSE (Treatment): {:.6f}'.format( + np.sqrt(mse(y[treatment == group], yhat_ts[group][treatment == group]))) + ) + logger.info(' MAE (Treatment): {:.6f}'.format( + mae(y[treatment == group], yhat_ts[group][treatment == group])) + ) + + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + for i, group in enumerate(self.t_groups): + te[:, i] = yhat_ts[group] - yhat_cs[group] + + if not return_components: + return te + else: + return te, yhat_cs, yhat_ts - def fit_predict(self, X, treatment, y, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, verbose=False): + def fit_predict(self, X, treatment, y, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, + return_components=False, verbose=True): """Fit the inference model of the T learner and predict treatment effects. Args: @@ -118,23 +144,33 @@ def fit_predict(self, X, treatment, y, return_ci=False, n_bootstraps=1000, boots UB [n_samples, n_treatment] """ self.fit(X, treatment, y) - te = self.predict(X, treatment, y) + te = self.predict(X, treatment, y, return_components=return_components) if not return_ci: return te else: start = pd.datetime.today() - te_bootstraps = np.zeros(shape=(X.shape[0], n_bootstraps)) + self.t_groups_global = self.t_groups + self._classes_global = self._classes + self.models_c_global = deepcopy(self.models_c) + self.models_t_global = deepcopy(self.models_t) + te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)) for i in range(n_bootstraps): te_b = self.bootstrap(X, treatment, y, size=bootstrap_size) - te_bootstraps[:, i] = np.ravel(te_b) - if verbose: + te_bootstraps[:, :, i] = te_b + if verbose and i % 10 == 0 and i > 0: now = pd.datetime.today() - lapsed = (now - start).seconds / 60 - logger.info('{}/{} bootstraps completed. ({:.01f} min lapsed)'.format(i + 1, n_bootstraps, lapsed)) + lapsed = (now-start).seconds + logger.info('{}/{} bootstraps completed. ({}s lapsed)'.format(i, n_bootstraps, lapsed)) - te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=1) - te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1) + te_lower = np.percentile(te_bootstraps, (self.ate_alpha/2)*100, axis=2) + te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2) + + # set member variables back to global (currently last bootstrapped outcome) + self.t_groups = self.t_groups_global + self._classes = self._classes_global + self.models_c = self.models_c_global + self.models_t = self.models_t_global return (te, te_lower, te_upper) @@ -149,39 +185,45 @@ def estimate_ate(self, X, treatment, y): Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) + te, yhat_cs, yhat_ts = self.fit_predict(X, treatment, y, return_components=True) - self.fit(X, treatment, y) + ate = np.zeros(self.t_groups.shape[0]) + ate_lb = np.zeros(self.t_groups.shape[0]) + ate_ub = np.zeros(self.t_groups.shape[0]) - yhat_c = self.model_c.predict(X) - yhat_t = self.model_t.predict(X) + for i, group in enumerate(self.t_groups): + yhat_c = yhat_cs[group] + yhat_t = yhat_ts[group] + _ate = te[:, i].mean() - te = (yhat_t - yhat_c).mean() - prob_treatment = float(sum(w)) / X.shape[0] + w = (treatment == group).astype(int) + prob_treatment = float(sum(w)) / X.shape[0] - se = np.sqrt(( - (y[~is_treatment] - yhat_c[~is_treatment]).var() - / (1 - prob_treatment) + - (y[is_treatment] - yhat_t[is_treatment]).var() - / prob_treatment + - (yhat_t - yhat_c).var() - ) / y.shape[0]) + se = np.sqrt(( + (y[treatment != group] - yhat_c[treatment != group]).var() + / (1 - prob_treatment) + + (y[treatment == group] - yhat_t[treatment == group]).var() + / prob_treatment + + (yhat_t - yhat_c).var() + ) / y.shape[0]) - te_lb = te - se * norm.ppf(1 - self.ate_alpha / 2) - te_ub = te + se * norm.ppf(1 - self.ate_alpha / 2) + _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) + _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) - return te, te_lb, te_ub + ate[i] = _ate + ate_lb[i] = _ate_lb + ate_ub[i] = _ate_ub + + return ate, ate_lb, ate_ub def bootstrap(self, X, treatment, y, size=10000): """Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population.""" - idxs = np.random.choice(np.arange(0, X.shape[0]), size=size) X_b = X[idxs] treatment_b = treatment[idxs] y_b = y[idxs] self.fit(X=X_b, treatment=treatment_b, y=y_b) - te_b = self.predict(X=X, treatment=treatment, y=y) + te_b = self.predict(X=X, treatment=treatment, verbose=False) return te_b @@ -205,7 +247,7 @@ def __init__(self, ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ - super(BaseTRegressor, self).__init__( + super().__init__( learner=learner, control_learner=control_learner, treatment_learner=treatment_learner, @@ -233,14 +275,14 @@ def __init__(self, ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ - super(BaseTClassifier, self).__init__( + super().__init__( learner=learner, control_learner=control_learner, treatment_learner=treatment_learner, ate_alpha=ate_alpha, control_name=control_name) - def predict(self, X, treatment=None, y=None): + def predict(self, X, treatment=None, y=None, return_components=False, verbose=True): """Predict treatment effects. Args: @@ -251,30 +293,55 @@ def predict(self, X, treatment=None, y=None): Returns: (numpy.ndarray): Predictions of treatment effects. """ - yhat_c = self.model_c.predict_proba(X)[:, 1] - yhat_t = self.model_t.predict_proba(X)[:, 1] - - if (y is not None) and (treatment is not None): - is_treatment = treatment != self.control_name - logger.info('RMSE (Control): {:.6f}'.format(np.sqrt(mse(y[~is_treatment], yhat_c[~is_treatment])))) - logger.info(' MAE (Control): {:.6f}'.format(mae(y[~is_treatment], yhat_c[~is_treatment]))) - logger.info('RMSE (Treatment): {:.6f}'.format(np.sqrt(mse(y[is_treatment], yhat_t[is_treatment])))) - logger.info(' MAE (Treatment): {:.6f}'.format(mae(y[is_treatment], yhat_t[is_treatment]))) - - return (yhat_t - yhat_c).reshape(-1, 1) + yhat_cs = {} + yhat_ts = {} + + for group in self.t_groups: + w = (treatment != group).astype(int) + X_new = np.hstack((w.reshape((-1, 1)), X)) + + model_c = self.models_c[group] + model_t = self.models_t[group] + yhat_cs[group] = model_c.predict(X_new) + yhat_ts[group] = model_t.predict(X_new) + + if (y is not None) and (treatment is not None) and verbose: + for group in self.t_groups: + logger.info('Error metrics for {}'.format(group)) + logger.info('RMSE (Control): {:.6f}'.format( + np.sqrt(mse(y[treatment != group], yhat_cs[group][treatment != group]))) + ) + logger.info(' MAE (Control): {:.6f}'.format( + mae(y[treatment != group], yhat_cs[group][treatment != group])) + ) + logger.info('RMSE (Treatment): {:.6f}'.format( + np.sqrt(mse(y[treatment == group], yhat_ts[group][treatment == group]))) + ) + logger.info(' MAE (Treatment): {:.6f}'.format( + mae(y[treatment == group], yhat_ts[group][treatment == group])) + ) + + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + for i, group in enumerate(self.t_groups): + te[:, i] = yhat_ts[group] - yhat_cs[group] + + if not return_components: + return te + else: + return te, yhat_cs, yhat_ts class XGBTRegressor(BaseTRegressor): def __init__(self, ate_alpha=.05, control_name=0, *args, **kwargs): """Initialize a T-learner with two XGBoost models.""" - super(XGBTRegressor, self).__init__(learner=XGBRegressor(*args, **kwargs), - ate_alpha=ate_alpha, - control_name=control_name) + super().__init__(learner=XGBRegressor(*args, **kwargs), + ate_alpha=ate_alpha, + control_name=control_name) class MLPTRegressor(BaseTRegressor): def __init__(self, ate_alpha=.05, control_name=0, *args, **kwargs): """Initialize a T-learner with two MLP models.""" - super(MLPTRegressor, self).__init__(learner=MLPRegressor(*args, **kwargs), - ate_alpha=ate_alpha, - control_name=control_name) + super().__init__(learner=MLPRegressor(*args, **kwargs), + ate_alpha=ate_alpha, + control_name=control_name) diff --git a/causalml/inference/meta/utils.py b/causalml/inference/meta/utils.py new file mode 100644 index 00000000..baa10e8a --- /dev/null +++ b/causalml/inference/meta/utils.py @@ -0,0 +1,15 @@ +import numpy as np + + +def check_control_in_treatment(treatment, control_name): + if np.unique(treatment).shape[0] == 2: + assert control_name in treatment, \ + 'If treatment vector has 2 unique values, one of them must be the control (specify in init step).' + + +def check_p_conditions(p, t_groups): + assert isinstance(p, (np.ndarray, dict)), \ + 'p must be an np.ndarray or dict type' + if isinstance(p, np.ndarray): + assert t_groups.shape[0] == 1, \ + 'If p is passed as an np.ndarray, there must be only 1 unique non-control group in the treatment vector.' diff --git a/causalml/inference/meta/xlearner.py b/causalml/inference/meta/xlearner.py index 63d84f64..72889083 100644 --- a/causalml/inference/meta/xlearner.py +++ b/causalml/inference/meta/xlearner.py @@ -1,12 +1,13 @@ from __future__ import absolute_import from __future__ import division from __future__ import print_function +from future.builtins import super from copy import deepcopy import logging import pandas as pd import numpy as np from scipy.stats import norm - +from .utils import check_control_in_treatment, check_p_conditions logger = logging.getLogger('causalml') @@ -67,9 +68,6 @@ def __init__(self, self.ate_alpha = ate_alpha self.control_name = control_name - self.t_var = 0.0 - self.c_var = 0.0 - def __repr__(self): return ('{}(control_outcome_learner={},\n' '\ttreatment_outcome_learner={},\n' @@ -88,55 +86,78 @@ def fit(self, X, treatment, y): treatment (np.array): a treatment vector y (np.array): an outcome vector """ - is_treatment = treatment != self.control_name - - t_groups = np.unique(treatment[is_treatment]) - self._classes = {} - self._classes[t_groups[0]] = 0 # this should be updated for multi-treatment case - - logger.info('Training the control group outcome model') - self.model_mu_c.fit(X[~is_treatment], y[~is_treatment]) - - logger.info('Training the treatment group outcome model') - self.model_mu_t.fit(X[is_treatment], y[is_treatment]) - - # Calculate variances and treatment effects - self.c_var = (y[~is_treatment] - self.model_mu_c.predict( - X[~is_treatment])).var() - self.t_var = (y[is_treatment] - self.model_mu_t.predict( - X[is_treatment])).var() - - d_c = self.model_mu_t.predict(X[~is_treatment]) - y[~is_treatment] - d_t = y[is_treatment] - self.model_mu_c.predict(X[is_treatment]) - - logger.info('Training the control group treatment model') - self.model_tau_c.fit(X[~is_treatment], d_c) - - logger.info('Training the treatment group treatment model') - self.model_tau_t.fit(X[is_treatment], d_t) - - def predict(self, X, p): + check_control_in_treatment(treatment, self.control_name) + self.t_groups = np.unique(treatment[treatment != self.control_name]) + self.t_groups.sort() + self._classes = {group: i for i, group in enumerate(self.t_groups)} + self.models_mu_c = {group: deepcopy(self.model_mu_c) for group in self.t_groups} + self.models_mu_t = {group: deepcopy(self.model_mu_t) for group in self.t_groups} + self.models_tau_c = {group: deepcopy(self.model_tau_c) for group in self.t_groups} + self.models_tau_t = {group: deepcopy(self.model_tau_t) for group in self.t_groups} + self.vars_c = {} + self.vars_t = {} + + for group in self.t_groups: + w = (treatment == group).astype(int) + + # Train outcome models + self.models_mu_c[group].fit(X[w == 0], y[w == 0]) + self.models_mu_t[group].fit(X[w == 1], y[w == 1]) + + # Calculate variances and treatment effects + var_c = (y[w == 0] - self.models_mu_c[group].predict(X[w == 0])).var() + self.vars_c[group] = var_c + var_t = (y[w == 1] - self.models_mu_t[group].predict(X[w == 1])).var() + self.vars_t[group] = var_t + + # Train treatment models + d_c = self.models_mu_t[group].predict(X[w == 0]) - y[w == 0] + d_t = y[w == 1] - self.models_mu_c[group].predict(X[w == 1]) + self.models_tau_c[group].fit(X[w == 0], d_c) + self.models_tau_t[group].fit(X[w == 1], d_t) + + def predict(self, X, p, return_components=False): """Predict treatment effects. Args: X (np.matrix): a feature matrix - p (np.array): a propensity vector of float between 0 and 1 + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) Returns: (numpy.ndarray): Predictions of treatment effects. """ + check_p_conditions(p, self.t_groups) + if isinstance(p, np.ndarray): + treatment_name = self.t_groups[0] + p = {treatment_name: p} - dhat_c = self.model_tau_c.predict(X) - dhat_t = self.model_tau_t.predict(X) + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + dhat_cs = {} + dhat_ts = {} - return (p * dhat_c + (1 - p) * dhat_t).reshape(-1, 1) + for i, group in enumerate(self.t_groups): + model_tau_c = self.models_tau_c[group] + model_tau_t = self.models_tau_t[group] + dhat_cs[group] = model_tau_c.predict(X) + dhat_ts[group] = model_tau_t.predict(X) - def fit_predict(self, X, p, treatment, y, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, verbose=False): + _te = (p[group] * dhat_cs[group] + (1 - p[group]) * dhat_cs[group]).reshape(-1, 1) + te[:, i] = np.ravel(_te) + + if not return_components: + return te + else: + return te, dhat_cs, dhat_ts + + def fit_predict(self, X, p, treatment, y, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, + return_components=False, verbose=True): """Fit the treatment effect and outcome models of the R learner and predict treatment effects. Args: X (np.matrix): a feature matrix - p (np.array): a propensity vector between 0 and 1 + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array): a treatment vector y (np.array): an outcome vector return_ci (bool): whether to return confidence intervals @@ -150,23 +171,37 @@ def fit_predict(self, X, p, treatment, y, return_ci=False, n_bootstraps=1000, bo UB [n_samples, n_treatment] """ self.fit(X, treatment, y) - te = self.predict(X, p) + te = self.predict(X, p, return_components=return_components) if not return_ci: return te else: start = pd.datetime.today() - te_bootstraps = np.zeros(shape=(X.shape[0], n_bootstraps)) + self.t_groups_global = self.t_groups + self._classes_global = self._classes + self.models_mu_c_global = deepcopy(self.models_mu_c) + self.models_mu_t_global = deepcopy(self.models_mu_t) + self.models_tau_c_global = deepcopy(self.models_tau_c) + self.models_tau_t_global = deepcopy(self.models_tau_t) + te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)) for i in range(n_bootstraps): te_b = self.bootstrap(X, p, treatment, y, size=bootstrap_size) - te_bootstraps[:, i] = np.ravel(te_b) - if verbose: + te_bootstraps[:, :, i] = te_b + if verbose and i % 10 == 0 and i > 0: now = pd.datetime.today() - lapsed = (now - start).seconds / 60 - logger.info('{}/{} bootstraps completed. ({:.01f} min lapsed)'.format(i + 1, n_bootstraps, lapsed)) + lapsed = (now-start).seconds + logger.info('{}/{} bootstraps completed. ({}s lapsed)'.format(i, n_bootstraps, lapsed)) + + te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2) + te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2) - te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=1) - te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1) + # set member variables back to global (currently last bootstrapped outcome) + self.t_groups = self.t_groups_global + self._classes = self._classes_global + self.models_mu_c = self.models_mu_c_global + self.models_mu_t = self.models_mu_t_global + self.models_tau_c = self.models_tau_c_global + self.models_tau_t = self.models_tau_t_global return (te, te_lower, te_upper) @@ -175,36 +210,47 @@ def estimate_ate(self, X, p, treatment, y): Args: X (np.matrix): a feature matrix - p (np.array): a propensity vector between 0 and 1 + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) treatment (np.array): a treatment vector y (np.array): an outcome vector Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ - is_treatment = treatment != self.control_name - w = is_treatment.astype(int) + te, dhat_cs, dhat_ts = self.fit_predict(X, p, treatment, y, return_components=True) - self.fit(X, treatment, y) + check_p_conditions(p, self.t_groups) + if isinstance(p, np.ndarray): + treatment_name = treatment_name = self.t_groups[0] + p = {treatment_name: p} - prob_treatment = float(sum(w)) / X.shape[0] + ate = np.zeros(self.t_groups.shape[0]) + ate_lb = np.zeros(self.t_groups.shape[0]) + ate_ub = np.zeros(self.t_groups.shape[0]) - dhat_c = self.model_tau_c.predict(X) - dhat_t = self.model_tau_t.predict(X) + for i, group in enumerate(self.t_groups): + w = (treatment == group).astype(int) + prob_treatment = float(sum(w)) / X.shape[0] + _ate = te[:, i].mean() + dhat_c = dhat_cs[group] + dhat_t = dhat_ts[group] - te = (p * dhat_c + (1 - p) * dhat_t).mean() + # SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009. + # "Recent Developments in the Econometrics of Program Evaluation." Journal of Economic Literature + se = np.sqrt(( + self.vars_t[group] / prob_treatment + self.vars_c[group] / (1 - prob_treatment) + + (p[group] * dhat_c + (1 - p[group]) * dhat_t).var() + ) / X.shape[0]) - # SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009. - # "Recent Developments in the Econometrics of Program Evaluation." Journal of Economic Literature - se = np.sqrt(( - self.t_var / prob_treatment + self.c_var / (1 - prob_treatment) + - (p * dhat_c + (1 - p) * dhat_t).var() - ) / X.shape[0]) + _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) + _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) - te_lb = te - se * norm.ppf(1 - self.ate_alpha / 2) - te_ub = te + se * norm.ppf(1 - self.ate_alpha / 2) + ate[i] = _ate + ate_lb[i] = _ate_lb + ate_ub[i] = _ate_ub - return te, te_lb, te_ub + return ate, ate_lb, ate_ub def bootstrap(self, X, p, treatment, y, size=10000): """Runs a single bootstrap. Fits on bootstrapped sample, then predicts on whole population.""" @@ -242,7 +288,7 @@ def __init__(self, ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ - super(BaseXRegressor, self).__init__( + super().__init__( learner=learner, control_outcome_learner=control_outcome_learner, treatment_outcome_learner=treatment_outcome_learner, @@ -280,7 +326,7 @@ def __init__(self, ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ - super(BaseXClassifier, self).__init__( + super().__init__( learner=learner, control_outcome_learner=control_outcome_learner, treatment_outcome_learner=treatment_outcome_learner, @@ -301,29 +347,66 @@ def fit(self, X, treatment, y): treatment (np.array): a treatment vector y (np.array): an outcome vector """ - is_treatment = treatment != self.control_name - - t_groups = np.unique(treatment[is_treatment]) - self._classes = {} - self._classes[t_groups[0]] = 0 # this should be updated for multi-treatment case + check_control_in_treatment(treatment, self.control_name) + self.t_groups = np.unique(treatment[treatment != self.control_name]) + self.t_groups.sort() + self._classes = {group: i for i, group in enumerate(self.t_groups)} + self.models_mu_c = {group: deepcopy(self.model_mu_c) for group in self.t_groups} + self.models_mu_t = {group: deepcopy(self.model_mu_t) for group in self.t_groups} + self.models_tau_c = {group: deepcopy(self.model_tau_c) for group in self.t_groups} + self.models_tau_t = {group: deepcopy(self.model_tau_t) for group in self.t_groups} + self.vars_c = {} + self.vars_t = {} + + for group in self.t_groups: + w = (treatment == group).astype(int) + + # Train outcome models + self.models_mu_c[group].fit(X[w == 0], y[w == 0]) + self.models_mu_t[group].fit(X[w == 1], y[w == 1]) + + # Calculate variances and treatment effects + var_c = (y[w == 0] - self.models_mu_c[group].predict_proba(X[w == 0])[:, 1]).var() + self.vars_c[group] = var_c + var_t = (y[w == 1] - self.models_mu_t[group].predict_proba(X[w == 1])[:, 1]).var() + self.vars_t[group] = var_t + + # Train treatment models + d_c = self.models_mu_t[group].predict_proba(X[w == 0])[:, 1] - y[w == 0] + d_t = y[w == 1] - self.models_mu_c[group].predict_proba(X[w == 1])[:, 1] + self.models_tau_c[group].fit(X[w == 0], d_c) + self.models_tau_t[group].fit(X[w == 1], d_t) + + def predict(self, X, p, return_components=False): + """Predict treatment effects. - logger.info('Training the control group outcome model') - self.model_mu_c.fit(X[~is_treatment], y[~is_treatment]) + Args: + X (np.matrix): a feature matrix + p (np.ndarray or dict): an array of propensity scores of float (0,1) in the single-treatment case + or, a dictionary of treatment groups that map to propensity vectors of float (0,1) - logger.info('Training the treatment group outcome model') - self.model_mu_t.fit(X[is_treatment], y[is_treatment]) + Returns: + (numpy.ndarray): Predictions of treatment effects. + """ + check_p_conditions(p, self.t_groups) + if isinstance(p, np.ndarray): + treatment_name = self.t_groups[0] + p = {treatment_name: p} - # Calculate variances and treatment effects - self.c_var = (y[~is_treatment] - self.model_mu_c.predict_proba( - X[~is_treatment])[:, 1]).var() - self.t_var = (y[is_treatment] - self.model_mu_t.predict_proba( - X[is_treatment])[:, 1]).var() + te = np.zeros((X.shape[0], self.t_groups.shape[0])) + dhat_cs = {} + dhat_ts = {} - d_c = self.model_mu_t.predict_proba(X[~is_treatment])[:, 1] - y[~is_treatment] - d_t = y[is_treatment] - self.model_mu_c.predict_proba(X[is_treatment])[:, 1] + for i, group in enumerate(self.t_groups): + model_tau_c = self.models_tau_c[group] + model_tau_t = self.models_tau_t[group] + dhat_cs[group] = model_tau_c.predict(X) + dhat_ts[group] = model_tau_t.predict(X) - logger.info('Training the control group treatment model') - self.model_tau_c.fit(X[~is_treatment], d_c) + _te = (p[group] * dhat_cs[group] + (1 - p[group]) * dhat_cs[group]).reshape(-1, 1) + te[:, i] = np.ravel(_te) - logger.info('Training the treatment group treatment model') - self.model_tau_t.fit(X[is_treatment], d_t) + if not return_components: + return te + else: + return te, dhat_cs, dhat_ts diff --git a/examples/meta_learners_with_synthetic_data_multiple_treatment.ipynb b/examples/meta_learners_with_synthetic_data_multiple_treatment.ipynb new file mode 100644 index 00000000..39e3f3d5 --- /dev/null +++ b/examples/meta_learners_with_synthetic_data_multiple_treatment.ipynb @@ -0,0 +1,2254 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# `causalml` - Meta-Learner Example Notebook\n", + "This notebook only contains regression examples." + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [], + "source": [ + "# pick the right base path (only run ONCE)\n", + "import os\n", + "base_path = os.path.abspath(\"../causalml\")\n", + "os.chdir(base_path)" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "%reload_ext autoreload\n", + "%autoreload 2" + ] + }, + { + "cell_type": "code", + "execution_count": 255, + "metadata": {}, + "outputs": [], + "source": [ + "import pandas as pd\n", + "import numpy as np\n", + "from matplotlib import pyplot as plt\n", + "from sklearn.linear_model import LinearRegression, LogisticRegression\n", + "from sklearn.model_selection import train_test_split\n", + "import statsmodels.api as sm\n", + "from xgboost import XGBRegressor, XGBClassifier\n", + "import warnings\n", + "\n", + "# from causalml.inference.meta import XGBTLearner, MLPTLearner\n", + "from inference.meta import BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor\n", + "from inference.meta import BaseSClassifier, BaseTClassifier, BaseXClassifier, BaseRClassifier\n", + "from inference.meta import LRSRegressor\n", + "from causalml.match import NearestNeighborMatch, MatchOptimizer, create_table_one\n", + "from causalml.propensity import ElasticNetPropensityModel\n", + "from causalml.dataset import *\n", + "from causalml.metrics import *\n", + "\n", + "warnings.filterwarnings('ignore')\n", + "plt.style.use('fivethirtyeight')\n", + "pd.set_option('display.float_format', lambda x: '%.4f' % x)\n", + "\n", + "# imports from package\n", + "import logging\n", + "from sklearn.dummy import DummyRegressor\n", + "from sklearn.metrics import mean_squared_error as mse\n", + "from sklearn.metrics import mean_absolute_error as mae\n", + "import statsmodels.api as sm\n", + "from copy import deepcopy\n", + "\n", + "logger = logging.getLogger('causalml')\n", + "logging.basicConfig(level=logging.INFO)\n", + "\n", + "%matplotlib inline" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Single Treatment Case" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generate synthetic data" + ] + }, + { + "cell_type": "code", + "execution_count": 97, + "metadata": {}, + "outputs": [], + "source": [ + "# Generate synthetic data using mode 1\n", + "y, X, treatment, tau, b, e = synthetic_data(mode=1, n=10000, p=8, sigma=1.0)\n", + "\n", + "treatment = np.array(['treatment_a' if val==1 else 'control' for val in treatment])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## S-Learner" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE" + ] + }, + { + "cell_type": "code", + "execution_count": 98, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.958908\n", + "INFO:causalml: MAE (Control): 0.763500\n", + "INFO:causalml:RMSE (Treatment): 0.962411\n", + "INFO:causalml: MAE (Treatment): 0.765833\n" + ] + } + ], + "source": [ + "learner_s = BaseSRegressor(XGBRegressor(), control_name='control')\n", + "ate_s = learner_s.estimate_ate(X=X, treatment=treatment, y=y, return_ci=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 99, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.55668548])" + ] + }, + "execution_count": 99, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ate_s" + ] + }, + { + "cell_type": "code", + "execution_count": 100, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'treatment_a': 0}" + ] + }, + "execution_count": 100, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "learner_s._classes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 103, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.958908\n", + "INFO:causalml: MAE (Control): 0.763500\n", + "INFO:causalml:RMSE (Treatment): 0.962411\n", + "INFO:causalml: MAE (Treatment): 0.765833\n", + "INFO:causalml:11/100 bootstraps completed. (3s lapsed)\n", + "INFO:causalml:21/100 bootstraps completed. (5s lapsed)\n", + "INFO:causalml:31/100 bootstraps completed. (8s lapsed)\n", + "INFO:causalml:41/100 bootstraps completed. (11s lapsed)\n", + "INFO:causalml:51/100 bootstraps completed. (14s lapsed)\n", + "INFO:causalml:61/100 bootstraps completed. (17s lapsed)\n", + "INFO:causalml:71/100 bootstraps completed. (20s lapsed)\n", + "INFO:causalml:81/100 bootstraps completed. (22s lapsed)\n", + "INFO:causalml:91/100 bootstraps completed. (25s lapsed)\n" + ] + } + ], + "source": [ + "alpha = 0.05\n", + "learner_s = BaseSRegressor(XGBRegressor(), ate_alpha=alpha, control_name='control')\n", + "ate_s, ate_s_lb, ate_s_ub = learner_s.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True,\n", + " n_bootstraps=100, bootstrap_size=5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 104, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.32128951],\n", + " [0.55668548],\n", + " [0.77378568]])" + ] + }, + "execution_count": 104, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.vstack((ate_s_lb, ate_s, ate_s_ub))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE" + ] + }, + { + "cell_type": "code", + "execution_count": 105, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.958908\n", + "INFO:causalml: MAE (Control): 0.763500\n", + "INFO:causalml:RMSE (Treatment): 0.962411\n", + "INFO:causalml: MAE (Treatment): 0.765833\n" + ] + } + ], + "source": [ + "learner_s = BaseSRegressor(XGBRegressor(), control_name='control')\n", + "cate_s = learner_s.fit_predict(X=X, treatment=treatment, y=y, return_ci=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 106, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.03796911],\n", + " [0.69114912],\n", + " [0.05045927],\n", + " ...,\n", + " [0.0114069 ],\n", + " [0.78504539],\n", + " [0.54530191]])" + ] + }, + "execution_count": 106, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 107, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.958908\n", + "INFO:causalml: MAE (Control): 0.763500\n", + "INFO:causalml:RMSE (Treatment): 0.962411\n", + "INFO:causalml: MAE (Treatment): 0.765833\n", + "INFO:causalml:11/100 bootstraps completed. (3s lapsed)\n", + "INFO:causalml:21/100 bootstraps completed. (5s lapsed)\n", + "INFO:causalml:31/100 bootstraps completed. (8s lapsed)\n", + "INFO:causalml:41/100 bootstraps completed. (11s lapsed)\n", + "INFO:causalml:51/100 bootstraps completed. (14s lapsed)\n", + "INFO:causalml:61/100 bootstraps completed. (16s lapsed)\n", + "INFO:causalml:71/100 bootstraps completed. (19s lapsed)\n", + "INFO:causalml:81/100 bootstraps completed. (22s lapsed)\n", + "INFO:causalml:91/100 bootstraps completed. (25s lapsed)\n" + ] + } + ], + "source": [ + "alpha = 0.05\n", + "learner_s = BaseSRegressor(XGBRegressor(), ate_alpha=alpha, control_name='control')\n", + "cate_s, cate_s_lb, cate_s_ub = learner_s.fit_predict(X=X, treatment=treatment, y=y, return_ci=True,\n", + " n_bootstraps=100, bootstrap_size=5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 108, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.03796911],\n", + " [0.69114912],\n", + " [0.05045927],\n", + " ...,\n", + " [0.0114069 ],\n", + " [0.78504539],\n", + " [0.54530191]])" + ] + }, + "execution_count": 108, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s" + ] + }, + { + "cell_type": "code", + "execution_count": 109, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-0.14735703],\n", + " [ 0.54512397],\n", + " [-0.13866318],\n", + " ...,\n", + " [-0.23455577],\n", + " [ 0.52985432],\n", + " [ 0.27041314]])" + ] + }, + "execution_count": 109, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s_lb" + ] + }, + { + "cell_type": "code", + "execution_count": 110, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.24406555],\n", + " [0.91976507],\n", + " [0.47135236],\n", + " ...,\n", + " [0.45338835],\n", + " [0.88356948],\n", + " [0.8016798 ]])" + ] + }, + "execution_count": 110, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s_ub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## T-Learner" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 111, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.929569\n", + "INFO:causalml: MAE (Control): 0.739058\n", + "INFO:causalml:RMSE (Treatment): 0.927878\n", + "INFO:causalml: MAE (Treatment): 0.737494\n" + ] + } + ], + "source": [ + "learner_t = BaseTRegressor(XGBRegressor(), control_name='control')\n", + "ate_t, ate_t_lb, ate_t_ub = learner_t.estimate_ate(X=X, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 159, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.50665447],\n", + " [0.54399855],\n", + " [0.58134264]])" + ] + }, + "execution_count": 159, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.vstack((ate_t_lb, ate_t, ate_t_ub))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE" + ] + }, + { + "cell_type": "code", + "execution_count": 115, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.929569\n", + "INFO:causalml: MAE (Control): 0.739058\n", + "INFO:causalml:RMSE (Treatment): 0.927878\n", + "INFO:causalml: MAE (Treatment): 0.737494\n" + ] + } + ], + "source": [ + "learner_t = BaseTRegressor(XGBRegressor(), control_name='control')\n", + "cate_t = learner_t.fit_predict(X=X, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 116, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-0.38664007],\n", + " [ 0.87922645],\n", + " [-0.00171328],\n", + " ...,\n", + " [ 0.43162906],\n", + " [ 0.9598496 ],\n", + " [ 0.50931144]])" + ] + }, + "execution_count": 116, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 117, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.929569\n", + "INFO:causalml: MAE (Control): 0.739058\n", + "INFO:causalml:RMSE (Treatment): 0.927878\n", + "INFO:causalml: MAE (Treatment): 0.737494\n", + "INFO:causalml:10/100 bootstraps completed. (3s lapsed)\n", + "INFO:causalml:20/100 bootstraps completed. (6s lapsed)\n", + "INFO:causalml:30/100 bootstraps completed. (8s lapsed)\n", + "INFO:causalml:40/100 bootstraps completed. (11s lapsed)\n", + "INFO:causalml:50/100 bootstraps completed. (14s lapsed)\n", + "INFO:causalml:60/100 bootstraps completed. (17s lapsed)\n", + "INFO:causalml:70/100 bootstraps completed. (20s lapsed)\n", + "INFO:causalml:80/100 bootstraps completed. (23s lapsed)\n", + "INFO:causalml:90/100 bootstraps completed. (25s lapsed)\n" + ] + } + ], + "source": [ + "learner_t = BaseTRegressor(XGBRegressor(), control_name='control')\n", + "cate_t, cate_t_lb, cate_t_ub = learner_t.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=100,\n", + " bootstrap_size=5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 119, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-0.38664007],\n", + " [ 0.87922645],\n", + " [-0.00171328],\n", + " ...,\n", + " [ 0.43162906],\n", + " [ 0.9598496 ],\n", + " [ 0.50931144]])" + ] + }, + "execution_count": 119, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t" + ] + }, + { + "cell_type": "code", + "execution_count": 120, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-1.10967693],\n", + " [ 0.61126621],\n", + " [-0.74953743],\n", + " ...,\n", + " [-0.79142535],\n", + " [ 0.51201607],\n", + " [-0.5868047 ]])" + ] + }, + "execution_count": 120, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t_lb" + ] + }, + { + "cell_type": "code", + "execution_count": 121, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.15802083],\n", + " [1.22920619],\n", + " [0.58200101],\n", + " ...,\n", + " [1.52186333],\n", + " [1.2845089 ],\n", + " [0.95881709]])" + ] + }, + "execution_count": 121, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t_ub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## X-Learner" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 144, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array(['treatment_a'], dtype=' 0.2 else 'treatment_b') \n", + " if val==1 else 'control' for val in treatment])\n", + "\n", + "e = {group: e for group in np.unique(treatment)}" + ] + }, + { + "cell_type": "code", + "execution_count": 173, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "control 4820\n", + "treatment_a 4110\n", + "treatment_b 1070\n", + "dtype: int64" + ] + }, + "execution_count": 173, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "pd.Series(treatment).value_counts()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## S-Learner" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE" + ] + }, + { + "cell_type": "code", + "execution_count": 174, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.991463\n", + "INFO:causalml: MAE (Control): 0.787094\n", + "INFO:causalml:RMSE (Treatment): 0.970982\n", + "INFO:causalml: MAE (Treatment): 0.774108\n", + "INFO:causalml:Error metrics for treatment_b\n", + "INFO:causalml:RMSE (Control): 0.998294\n", + "INFO:causalml: MAE (Control): 0.794750\n", + "INFO:causalml:RMSE (Treatment): 0.919352\n", + "INFO:causalml: MAE (Treatment): 0.733635\n" + ] + } + ], + "source": [ + "learner_s = BaseSRegressor(XGBRegressor(), control_name='control')\n", + "ate_s = learner_s.estimate_ate(X=X, treatment=treatment, y=y, return_ci=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 175, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([0.35913358, 0.18256349])" + ] + }, + "execution_count": 175, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "ate_s" + ] + }, + { + "cell_type": "code", + "execution_count": 176, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'treatment_a': 0, 'treatment_b': 1}" + ] + }, + "execution_count": 176, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "learner_s._classes" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE w/ Confidence Intervals\n", + "Note: S-Learner is the only learner that uses bootstrapping to get confidence intervals." + ] + }, + { + "cell_type": "code", + "execution_count": 178, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.991463\n", + "INFO:causalml: MAE (Control): 0.787094\n", + "INFO:causalml:RMSE (Treatment): 0.970982\n", + "INFO:causalml: MAE (Treatment): 0.774108\n", + "INFO:causalml:Error metrics for treatment_b\n", + "INFO:causalml:RMSE (Control): 0.998294\n", + "INFO:causalml: MAE (Control): 0.794750\n", + "INFO:causalml:RMSE (Treatment): 0.919352\n", + "INFO:causalml: MAE (Treatment): 0.733635\n", + "INFO:causalml:11/100 bootstraps completed. (6s lapsed)\n", + "INFO:causalml:21/100 bootstraps completed. (11s lapsed)\n", + "INFO:causalml:31/100 bootstraps completed. (16s lapsed)\n", + "INFO:causalml:41/100 bootstraps completed. (22s lapsed)\n", + "INFO:causalml:51/100 bootstraps completed. (27s lapsed)\n", + "INFO:causalml:61/100 bootstraps completed. (33s lapsed)\n", + "INFO:causalml:71/100 bootstraps completed. (38s lapsed)\n", + "INFO:causalml:81/100 bootstraps completed. (44s lapsed)\n", + "INFO:causalml:91/100 bootstraps completed. (49s lapsed)\n" + ] + } + ], + "source": [ + "alpha = 0.05\n", + "learner_s = BaseSRegressor(XGBRegressor(), ate_alpha=alpha, control_name='control')\n", + "ate_s, ate_s_lb, ate_s_ub = learner_s.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True,\n", + " n_bootstraps=100, bootstrap_size=5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 179, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.14345376, -0.03237222],\n", + " [ 0.35913358, 0.18256349],\n", + " [ 0.5507962 , 0.37659037]])" + ] + }, + "execution_count": 179, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.vstack((ate_s_lb, ate_s, ate_s_ub))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE" + ] + }, + { + "cell_type": "code", + "execution_count": 180, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.991463\n", + "INFO:causalml: MAE (Control): 0.787094\n", + "INFO:causalml:RMSE (Treatment): 0.970982\n", + "INFO:causalml: MAE (Treatment): 0.774108\n", + "INFO:causalml:Error metrics for treatment_b\n", + "INFO:causalml:RMSE (Control): 0.998294\n", + "INFO:causalml: MAE (Control): 0.794750\n", + "INFO:causalml:RMSE (Treatment): 0.919352\n", + "INFO:causalml: MAE (Treatment): 0.733635\n" + ] + } + ], + "source": [ + "learner_s = BaseSRegressor(XGBRegressor(), control_name='control')\n", + "cate_s = learner_s.fit_predict(X=X, treatment=treatment, y=y, return_ci=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 181, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.29305696, 0.20715189],\n", + " [0.2394737 , 0.23082519],\n", + " [0.45016623, 0.13159585],\n", + " ...,\n", + " [0.44311321, 0.15472066],\n", + " [0.39511681, 0.15956807],\n", + " [0.22280121, 0.14637351]])" + ] + }, + "execution_count": 181, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 182, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.991463\n", + "INFO:causalml: MAE (Control): 0.787094\n", + "INFO:causalml:RMSE (Treatment): 0.970982\n", + "INFO:causalml: MAE (Treatment): 0.774108\n", + "INFO:causalml:Error metrics for treatment_b\n", + "INFO:causalml:RMSE (Control): 0.998294\n", + "INFO:causalml: MAE (Control): 0.794750\n", + "INFO:causalml:RMSE (Treatment): 0.919352\n", + "INFO:causalml: MAE (Treatment): 0.733635\n", + "INFO:causalml:11/100 bootstraps completed. (6s lapsed)\n", + "INFO:causalml:21/100 bootstraps completed. (11s lapsed)\n", + "INFO:causalml:31/100 bootstraps completed. (17s lapsed)\n", + "INFO:causalml:41/100 bootstraps completed. (22s lapsed)\n", + "INFO:causalml:51/100 bootstraps completed. (28s lapsed)\n", + "INFO:causalml:61/100 bootstraps completed. (33s lapsed)\n", + "INFO:causalml:71/100 bootstraps completed. (39s lapsed)\n", + "INFO:causalml:81/100 bootstraps completed. (44s lapsed)\n", + "INFO:causalml:91/100 bootstraps completed. (49s lapsed)\n" + ] + } + ], + "source": [ + "alpha = 0.05\n", + "learner_s = BaseSRegressor(XGBRegressor(), ate_alpha=alpha, control_name='control')\n", + "cate_s, cate_s_lb, cate_s_ub = learner_s.fit_predict(X=X, treatment=treatment, y=y, return_ci=True,\n", + " n_bootstraps=100, bootstrap_size=5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 183, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.29305696, 0.20715189],\n", + " [0.2394737 , 0.23082519],\n", + " [0.45016623, 0.13159585],\n", + " ...,\n", + " [0.44311321, 0.15472066],\n", + " [0.39511681, 0.15956807],\n", + " [0.22280121, 0.14637351]])" + ] + }, + "execution_count": 183, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s" + ] + }, + { + "cell_type": "code", + "execution_count": 184, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-0.04259078, 0.03480431],\n", + " [-0.05326713, 0.048358 ],\n", + " [ 0.27760496, -0.06061491],\n", + " ...,\n", + " [ 0.28635021, 0.02252018],\n", + " [ 0.19727507, -0.03052257],\n", + " [ 0.07766245, -0.01838151]])" + ] + }, + "execution_count": 184, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s_lb" + ] + }, + { + "cell_type": "code", + "execution_count": 185, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.44047487, 0.33187835],\n", + " [0.45214516, 0.34100947],\n", + " [0.61380313, 0.31094917],\n", + " ...,\n", + " [0.60039392, 0.33422943],\n", + " [0.61935563, 0.25369784],\n", + " [0.52657488, 0.42135347]])" + ] + }, + "execution_count": 185, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_s_ub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## T-Learner" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 186, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.971329\n", + "INFO:causalml: MAE (Control): 0.769895\n", + "INFO:causalml:RMSE (Treatment): 0.934618\n", + "INFO:causalml: MAE (Treatment): 0.743548\n", + "INFO:causalml:Error metrics for treatment_b\n", + "INFO:causalml:RMSE (Control): 0.993107\n", + "INFO:causalml: MAE (Control): 0.789818\n", + "INFO:causalml:RMSE (Treatment): 0.796908\n", + "INFO:causalml: MAE (Treatment): 0.628517\n" + ] + } + ], + "source": [ + "learner_t = BaseTRegressor(XGBRegressor(), control_name='control')\n", + "ate_t, ate_t_lb, ate_t_ub = learner_t.estimate_ate(X=X, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 187, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.32926924, 0.19934954],\n", + " [0.36743379, 0.25176689],\n", + " [0.40559833, 0.30418423]])" + ] + }, + "execution_count": 187, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.vstack((ate_t_lb, ate_t, ate_t_ub))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE" + ] + }, + { + "cell_type": "code", + "execution_count": 188, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.971329\n", + "INFO:causalml: MAE (Control): 0.769895\n", + "INFO:causalml:RMSE (Treatment): 0.934618\n", + "INFO:causalml: MAE (Treatment): 0.743548\n", + "INFO:causalml:Error metrics for treatment_b\n", + "INFO:causalml:RMSE (Control): 0.993107\n", + "INFO:causalml: MAE (Control): 0.789818\n", + "INFO:causalml:RMSE (Treatment): 0.796908\n", + "INFO:causalml: MAE (Treatment): 0.628517\n" + ] + } + ], + "source": [ + "learner_t = BaseTRegressor(XGBRegressor(), control_name='control')\n", + "cate_t = learner_t.fit_predict(X=X, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 189, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.28441787, 0.38072431],\n", + " [ 0.56165069, 0.65201128],\n", + " [ 0.54719591, -0.18444657],\n", + " ...,\n", + " [ 0.44603789, 0.04932952],\n", + " [ 0.61214852, -0.05190253],\n", + " [ 0.38557649, 0.68061471]])" + ] + }, + "execution_count": 189, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 190, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:Error metrics for treatment_a\n", + "INFO:causalml:RMSE (Control): 0.971329\n", + "INFO:causalml: MAE (Control): 0.769895\n", + "INFO:causalml:RMSE (Treatment): 0.934618\n", + "INFO:causalml: MAE (Treatment): 0.743548\n", + "INFO:causalml:Error metrics for treatment_b\n", + "INFO:causalml:RMSE (Control): 0.993107\n", + "INFO:causalml: MAE (Control): 0.789818\n", + "INFO:causalml:RMSE (Treatment): 0.796908\n", + "INFO:causalml: MAE (Treatment): 0.628517\n", + "INFO:causalml:10/100 bootstraps completed. (6s lapsed)\n", + "INFO:causalml:20/100 bootstraps completed. (12s lapsed)\n", + "INFO:causalml:30/100 bootstraps completed. (17s lapsed)\n", + "INFO:causalml:40/100 bootstraps completed. (23s lapsed)\n", + "INFO:causalml:50/100 bootstraps completed. (28s lapsed)\n", + "INFO:causalml:60/100 bootstraps completed. (34s lapsed)\n", + "INFO:causalml:70/100 bootstraps completed. (40s lapsed)\n", + "INFO:causalml:80/100 bootstraps completed. (45s lapsed)\n", + "INFO:causalml:90/100 bootstraps completed. (51s lapsed)\n" + ] + } + ], + "source": [ + "learner_t = BaseTRegressor(XGBRegressor(), control_name='control')\n", + "cate_t, cate_t_lb, cate_t_ub = learner_t.fit_predict(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=100,\n", + " bootstrap_size=5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 191, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.28441787, 0.38072431],\n", + " [ 0.56165069, 0.65201128],\n", + " [ 0.54719591, -0.18444657],\n", + " ...,\n", + " [ 0.44603789, 0.04932952],\n", + " [ 0.61214852, -0.05190253],\n", + " [ 0.38557649, 0.68061471]])" + ] + }, + "execution_count": 191, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t" + ] + }, + { + "cell_type": "code", + "execution_count": 192, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-0.04115869, -0.05224179],\n", + " [-0.16902418, -0.19706225],\n", + " [-0.00189665, -0.8080983 ],\n", + " ...,\n", + " [ 0.10415745, -0.23481247],\n", + " [ 0.15116205, -0.78067239],\n", + " [-0.46228578, -0.81511819]])" + ] + }, + "execution_count": 192, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t_lb" + ] + }, + { + "cell_type": "code", + "execution_count": 193, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.74080166, 1.06598807],\n", + " [0.79825233, 0.8707759 ],\n", + " [1.34376909, 0.64097773],\n", + " ...,\n", + " [0.75215713, 0.51593655],\n", + " [1.03536093, 0.43499326],\n", + " [1.04148687, 1.6343584 ]])" + ] + }, + "execution_count": 193, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_t_ub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## X-Learner" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 194, + "metadata": {}, + "outputs": [], + "source": [ + "learner_x = BaseXRegressor(XGBRegressor(), control_name='control')\n", + "ate_x, ate_x_lb, ate_x_ub = learner_x.estimate_ate(X=X, p=e, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 195, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.31400111, 0.19998552],\n", + " [0.35198314, 0.25217314],\n", + " [0.38996516, 0.30436076]])" + ] + }, + "execution_count": 195, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.vstack((ate_x_lb, ate_x, ate_x_ub))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE" + ] + }, + { + "cell_type": "code", + "execution_count": 196, + "metadata": {}, + "outputs": [], + "source": [ + "learner_x = BaseXRegressor(XGBRegressor(), control_name='control')\n", + "cate_x = learner_x.fit_predict(X=X, p=e, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 197, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.26990545, 0.26035661],\n", + " [ 0.26034176, 0.46985847],\n", + " [ 0.37140876, -0.04080141],\n", + " ...,\n", + " [ 0.43502373, 0.22133699],\n", + " [ 0.42731443, -0.0570209 ],\n", + " [ 0.46857533, 0.35744303]])" + ] + }, + "execution_count": 197, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_x" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 198, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:10/100 bootstraps completed. (10s lapsed)\n", + "INFO:causalml:20/100 bootstraps completed. (20s lapsed)\n", + "INFO:causalml:30/100 bootstraps completed. (31s lapsed)\n", + "INFO:causalml:40/100 bootstraps completed. (41s lapsed)\n", + "INFO:causalml:50/100 bootstraps completed. (51s lapsed)\n", + "INFO:causalml:60/100 bootstraps completed. (61s lapsed)\n", + "INFO:causalml:70/100 bootstraps completed. (71s lapsed)\n", + "INFO:causalml:80/100 bootstraps completed. (81s lapsed)\n", + "INFO:causalml:90/100 bootstraps completed. (91s lapsed)\n" + ] + } + ], + "source": [ + "learner_x = BaseXRegressor(XGBRegressor(), control_name='control')\n", + "cate_x, cate_x_lb, cate_x_ub = learner_x.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True,\n", + " n_bootstraps=100, bootstrap_size=5000)" + ] + }, + { + "cell_type": "code", + "execution_count": 199, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'treatment_a': 0, 'treatment_b': 1}" + ] + }, + "execution_count": 199, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "learner_x._classes" + ] + }, + { + "cell_type": "code", + "execution_count": 200, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.26990545, 0.26035661],\n", + " [ 0.26034176, 0.46985847],\n", + " [ 0.37140876, -0.04080141],\n", + " ...,\n", + " [ 0.43502373, 0.22133699],\n", + " [ 0.42731443, -0.0570209 ],\n", + " [ 0.46857533, 0.35744303]])" + ] + }, + "execution_count": 200, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_x" + ] + }, + { + "cell_type": "code", + "execution_count": 201, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.02105564, -0.00483451],\n", + " [ 0.05179861, 0.09180887],\n", + " [ 0.12221365, -0.44646286],\n", + " ...,\n", + " [ 0.25909399, -0.02073404],\n", + " [ 0.14467462, -0.32544759],\n", + " [-0.08245101, -0.32453551]])" + ] + }, + "execution_count": 201, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_x_lb" + ] + }, + { + "cell_type": "code", + "execution_count": 202, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.48926775, 0.61125675],\n", + " [0.60614869, 0.71524641],\n", + " [0.96047391, 0.43098129],\n", + " ...,\n", + " [0.67855977, 0.40955369],\n", + " [0.63019724, 0.34458099],\n", + " [0.98388902, 0.98262639]])" + ] + }, + "execution_count": 202, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_x_ub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## R-Learner" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### ATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 203, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:generating out-of-fold CV outcome estimates\n", + "INFO:causalml:training the treatment effect model for treatment_a with R-loss\n", + "INFO:causalml:training the treatment effect model for treatment_b with R-loss\n" + ] + } + ], + "source": [ + "learner_r = BaseRRegressor(XGBRegressor(), control_name='control')\n", + "ate_r, ate_r_lb, ate_r_ub = learner_r.estimate_ate(X=X, p=e, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 204, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.28862501, 0.10444676],\n", + " [0.2890347 , 0.10506716],\n", + " [0.28944439, 0.10568755]])" + ] + }, + "execution_count": 204, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "np.vstack((ate_r_lb, ate_r, ate_r_ub))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE" + ] + }, + { + "cell_type": "code", + "execution_count": 205, + "metadata": {}, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:generating out-of-fold CV outcome estimates\n", + "INFO:causalml:training the treatment effect model for treatment_a with R-loss\n", + "INFO:causalml:training the treatment effect model for treatment_b with R-loss\n" + ] + } + ], + "source": [ + "learner_r = BaseRRegressor(XGBRegressor(), control_name='control')\n", + "cate_r = learner_r.fit_predict(X=X, p=e, treatment=treatment, y=y)" + ] + }, + { + "cell_type": "code", + "execution_count": 206, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.15946019, 0.0566867 ],\n", + " [ 0.26207751, 0.08914298],\n", + " [ 0.35971397, -0.11964369],\n", + " ...,\n", + " [ 0.28634119, -0.05715203],\n", + " [ 0.33082661, -0.08944744],\n", + " [ 0.22354212, 0.29110223]])" + ] + }, + "execution_count": 206, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_r" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### CATE w/ Confidence Intervals" + ] + }, + { + "cell_type": "code", + "execution_count": 207, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "INFO:causalml:generating out-of-fold CV outcome estimates\n", + "INFO:causalml:training the treatment effect model for treatment_a with R-loss\n", + "INFO:causalml:training the treatment effect model for treatment_b with R-loss\n", + "INFO:causalml:10/100 bootstraps completed. (8s lapsed)\n", + "INFO:causalml:20/100 bootstraps completed. (16s lapsed)\n", + "INFO:causalml:30/100 bootstraps completed. (25s lapsed)\n", + "INFO:causalml:40/100 bootstraps completed. (33s lapsed)\n", + "INFO:causalml:50/100 bootstraps completed. (42s lapsed)\n", + "INFO:causalml:60/100 bootstraps completed. (50s lapsed)\n", + "INFO:causalml:70/100 bootstraps completed. (58s lapsed)\n", + "INFO:causalml:80/100 bootstraps completed. (66s lapsed)\n", + "INFO:causalml:90/100 bootstraps completed. (74s lapsed)\n" + ] + } + ], + "source": [ + "learner_r = BaseRRegressor(XGBRegressor(), control_name='control')\n", + "cate_r, cate_r_lb, cate_r_ub = learner_r.fit_predict(X=X, p=e, treatment=treatment, y=y, return_ci=True,\n", + " n_bootstraps=100, bootstrap_size=3000)" + ] + }, + { + "cell_type": "code", + "execution_count": 208, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[ 0.09944364, 0.04709589],\n", + " [ 0.25123295, 0.08984098],\n", + " [ 0.36286271, -0.14600784],\n", + " ...,\n", + " [ 0.34693044, -0.04801428],\n", + " [ 0.32974464, -0.1315679 ],\n", + " [ 0.2060248 , 0.16616488]])" + ] + }, + "execution_count": 208, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_r" + ] + }, + { + "cell_type": "code", + "execution_count": 209, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[-0.34995239, -0.19428928],\n", + " [-0.23480286, -0.33930292],\n", + " [-0.08365641, -0.61383729],\n", + " ...,\n", + " [-0.0510494 , -0.20574036],\n", + " [-0.01827118, -0.37462993],\n", + " [-0.60967366, -0.25259556]])" + ] + }, + "execution_count": 209, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_r_lb" + ] + }, + { + "cell_type": "code", + "execution_count": 210, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "array([[0.59212104, 0.51029717],\n", + " [0.74375887, 0.48526926],\n", + " [1.17339422, 0.11729919],\n", + " ...,\n", + " [0.8433193 , 0.18996149],\n", + " [0.80591545, 0.37860441],\n", + " [1.47198277, 0.90086589]])" + ] + }, + "execution_count": 210, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "cate_r_ub" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Visualize" + ] + }, + { + "cell_type": "code", + "execution_count": 212, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'treatment_a': 0, 'treatment_b': 1}" + ] + }, + "execution_count": 212, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "groups" + ] + }, + { + "cell_type": "code", + "execution_count": 214, + "metadata": {}, + "outputs": [ + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAysAAAHwCAYAAACmMfCZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xl4FFXa9/HvnUgIyBLEhQwqOoiyiOAKsm8KERVHxkeYUcHtUXAUH1EBR8SViOIo7jI6AoqI4+AoKiIawhAVGX2HRQQMyCIIKqMJmwESzvtHVWJ300k60OmK5Pe5rr7oOnXqnLu6q0PdfepUm3MOERERERGRqiYp6ABERERERESiUbIiIiIiIiJVkpIVERERERGpkpSsiIiIiIhIlaRkRUREREREqiQlKyIiIiIiUiUpWRGRmJjZJDP7oJLaHmxmhaUtV0J/d5vZqspqv6LMrLWZLTSzAjNbG3Q8vzZmdr+ZrSht+QDa3WBmIw+0nXgws4fM7Hszc2Z2WdDxiIgkipIVkWrMT0Cc/9hjZlvMLMfMbjezQyOqDwMuqUDbhWY2OMbq04HGsbZdgRg6+ft2XMSq8UD7ePd3AB4CtgLNgTNLq2Rmh5jZjX5is83MtprZf8zsz2bWIEr9L82syMxaRZRvCHnfoz0K/Xovl7I+r5wYQ+tuN7NFFTgW4uFBoFOslctIxE8FnohbVPvJzDoCtwFXAenA63Fuv1K/HCilz27+8XF0Ivstj5llm9nzQcchIr9QsiIi8/FOgJoA3YGpwJ+A/2dmRxVXcs7lO+d+imfH5qnhnPvZOfddPNsui3Nuu3NuS6L6i0EzYJ5zbq1z7odoFcysBvAO8ADwGtADOAX4M17iNSiifhfgMOAF4H8jmjsV7z1PB872y/qGlIUmjnNDyosfJ8awT9f7dU8F5gAvmtnvStm3JDNLjqHNmMTr/XXO/eCc2xGPmA5QM2C3c+5t59xm51zB/jRiZikHEsSBbi8isl+cc3rooUc1fQCTgA+ilDcGfgReLK0u0AqYDeQBO4DlwOX+urWAC3345YOBQryk6D/AbiCjuDyk7eJ6vYBlQAHwKdA2sk5E3Ef7/XUDjouMAcj2690NrIrYdhDwpR/TBuB+4JCQ9dnA88BoYLP/+kwB6pTzGqcDr/qv089+O2f466LFeHcp7QwH9gJnl7K+QcTyy8AjQDs/1tRStjvB77d9lHUvA+9V8Jg6xG9vQET5GuAl//n9wApgILDSf6+b+ev+CCz23/M1eKNgtUPaqQVMBPL9/XoKb2RqRUid+0OX/bJzgRxgp/9eZAPH+3Uj34PL/G02ACND2qgP/BX4wY9vIdAzymv5e7zEciewGv9zEVLvOn//C4D/+rH8ppTX8+WI2Ar9cgNG+K/Rbr+fGyO23QDcAzzr9/NRlPZ7Rdn/5/11Of5rPRbYBGzwy2sA9+F9zn8GvgCuiWj3//z3cbu/7StAo4jXKfTxQegxB9zsx78deA7vuLoBWA/8BDwD1Ajpz/BGf1f6r+tXwCjCP8MbgLvwRst+Ar7DO76SS3mtHdAphmP+Mv9Y2ApsAd4GTqjI50YPPfQo/aGRFRHZh3NuI94Iy8VmVtrfiWl4J0AdgNbALXgnAOBdylSEd8JR/G18sSRgnF+/OfBZKe0n4Z2EDgXOwjtBfMfMasW4G98A/fznZ/kxXBytopn1Bf4GvAScjJcY3ACMiaj6e7zRim7AAOB8vBPGqMzMgH/i7ef5fhzfAXPM7HA/xnS8k6hx/vPxpTR3OZDlnPsk2koXMuplZof5sU5yzn3q9/k/pcWZID/jneQWOwZvxOdyvMR3k5ldAzwOPAy0xEtI++AlJMUewntfL8c79nbjjeKUysx6A7PwTijP9h9T8U6AH8QbqSoeYSzrMqtJQE+8hOpUv713zaxZRL1xwIt4I1+v440qNfVjaQc8iXeyfxLesfRKGeHfgHc87iJ81OsmvOPzAbzX7xFgvJkNitj+/4CNeKNv10Rp/194n9Miftn/W0LWD8RL0nrgfbGAv28X+O21xEv4HonS9y14fxv6A7/Fe83BS7D6+89P8/sMvcT0bLzX7hy8ROBKYCZwBtAbuMIvGxyyzX3+ftwOtPD3+wa8kcfI12M93mdxmL9N8RygG4BP8N6P4tfiU8pXEy8pPBUvKTbgbX80VEQOVNDZkh566BHcg1JGVvx11+N9s3hktLp432wPLqPtwsj1eCcXDugcpTxyZMUR/q11A7xvWa+Oto1fVjKy4i938pePi6h3NyEjK3gnqq9F1BmGd4Kd4i9nA4sj6jwDfFLGa9DT779lSFlNvG+a7wopWwvcWc57tRN4PMb39f+Az0OWRwI5pdQtb2Sl0H/dQx9vlNF32MgKXoJynV92jV92P97JceOIbTew7zf0PfBGlOoC9fBO2q+MqPMfyhhZwTsB/WdFPweEjKzgJRYOODdkveGNHkyMeC1ving9doYct5fgJfV1K/A5vQYoiCjbBIyNKHsC+Coi/tkxtD+YiM+SX56DN2JqIWXN/H08IaLuvcBnZfRxpr/dUf5yN3/56CjH3CbCR01m4yXcKSFl7wCv+s/r4H1We0W0dRWwJeL1mBFRZw7+iJ+/nI0/srS/D+AIf9/aHUg7euihh/fQyIqIlMb8f10p68cDz/sTUu82s9Mq0Pa/Y6xXMorgvJGD5XjfIsdbK7xvmEPNA1KBpiFliyPqfAscRelaAf91zn1ZXOCc24X3bW1F98PKr1LiWrwT8GIvA2dHTrSP0cdA24jH0Bi2m2Rm2/FOIh/GGwF4IWT9t84bwQPAzIpHDR73J+Vv97efibfvJ/iPFD+mUB+VE8tpwPsxxFyW4tdufnGBc875y5Gv66KQOoV4o4LFx8lsvG/215jZNDO71swaViQQf+SsEdGP2aZmVjOkbGFF2o7iM38/i53h/7so4n26HS+RKY6xh5m9b2bfmNk2vCQAvLlx5fnSObcnZHkzXvK5O6LsSP95a7zP6psRMT0FNIy4+cQiwpX3GS6XmZ1mZv80s7X+vq7xV8WyryJSjkOCDkBEqqxWeKMn/4220jl3n5lNxbtMpwdwh5k95Jy7s5x2i9x+ThCOsDdKWWVfdrE7YtmRuBuVrMS75KZM/sT6FsCjZvZoyKokvMuuhlWw353Ouf25zfMIvG+/twPfRZzwgjfPKVTx6/gn9j0JB++Suf1NVCuS6MVDqceJc26rmZ2ON+rXE+/So4fMrLtzLvJEOh4O9AYBpb1P7fHmhoTaC2Bmx+O995PwRjH/i3fiPhsv2SzPnohlV0pZcSzF//4O+DpKe1tDnsf1M2xmdfES4bl4I1SbgWS8eTy6IYFIHGhkRUT2YWaN8a7Ln+Gci5YUAOCc+9o597Rz7vd4E1eHhKzejfef9oEoub2wmaXhnYQXj1J8DySH3rEM7xv0UMUnJuXFsQzoElHWFW9UYHVFAo7SbkMzK0ky/G+92+GdzFTEy0APMzs72sqQb4//F+/SlsjRkFuAy80stYL97q/vnHOrnHf3qtJG50J9i3f5z4n+dpGPXcAqvJPWDhHbRi5H+hxvLkFpYjlWl/n/di4u8OckdaaC76VzrtA5l+2cG403z+EHvLkhsW7/I95JcbRjtvi1qojdQJK/P+X53P/36CjvUXGicBbe5Y7DnHMfO+dW4o0ERfYJB/43AmAp3uWBvy3l2CmqQFsV/bvVEmgI3OG/pyv8ZRGJE42siEiKmTXC+/KiId43vqPwkoFR0TYwszp4k4j/gXfJQxreCMuXIdXWAN3NbBbebVcreitZh/eNc/HE/QeAbfwyGXmhv/ygmY3Fu1zrrog21uF923uemU0Hdjnn8qP0lQnMNO8HAGfgndzfDTwScelJRWX5cb5iZjfgjVSNxrtk5ZkKtjUBb3LxbDO7F++ymh/wErjrgblm9hLexPprnXNhJ9Bmtg5vP/8H7y5msSo+PiJFGy3Zb845Z2Z/Bp41s3zgLbz5Mi3x5okM8Ucl/gqMNbMfgFy85OwEvGSnNPfi3ZzhL3jf9u8COgLznXO5eMdqPz+p/B7YFnnC75xbaWZv+PFdhzf/4Qa8myf8Ptb9NLOL8W4uMB/vzlFn4l3+9mVZ20WRiXfsr8YbieqF91pE3qY6FmvwRp/ON7MFwM/Oue3RKjrnVpjZFOBvZnYbsABvzsgZwGHOuYfx7sRlwHAzexXv8xQ54roO7zPe18xex5uTs5X94B8X44BxfsL1Id4oaxugtXMu6t+xUqwBOvg3RMgH8vxL+UqzFi/BuckfyWyKd/c0EYkTjayISGe8b7TX450A/xHvbkWnudJ/+6QQb8L7C3jzSIonwP4hpM5w4HS8/8yj/nZIOfYCd+DdtvQzvG9m+zrndkLJt8sD8UZfluAlAbeHNuDHPwpvgvkm4M1oHTnn3sWbjDsI71vyR4Gn8e7ws9/8k/mL8G5T+w7eXJ1GwDkVTd78a/gz8PZzAN78hKV4J60Lgcn88lsr++ync24b3h2xKnoy2x3vtYt81K9gO+Vyzr2I9572w3vP/42XgG4MqXYb3mv5Ct7cn0Pxbs1bVruz8O7G1gHvtfoU7w5QxZcW/RVvkv4CvGO1tB8/vRLvRHga3tyHs4Dz/IQnVj/iHRPv453UZwL3OOcmV6AN8CbT34OXBCwDbgVu2492cN4d5p7EuyPe98Bj5Wxytd//XXif/w/w7s72td/ef/AuN7wBLwn7P7y7boX2uRHvTl134h1PMyoad0R7Y/COjevxPhc5eHdMW1vBph7GS1IW4x0LZf54rP835nK8z+aXeF/i/F8F+xSRMlgcvxgTERERERGJG42siIiIiIhIlaRkRURERCQKMxsdejvkiEde0PGJVAe6DExEREQkCv83bQ4rZbVzzh3I3QJFJAYHXbKSn59/cO2QiIiIiEg1Ub9+/bDbqOsyMBERERERqZKUrIiIiIiISJWkZEUOGrm5FfmpA5HKpeNRqhIdj1KV6HiUilCyIiIiIiIiVZKSFRERERERqZIOCToAEREREQmOc47t27ezd+/ehPSXmppKfn5+QvqSqicpKYk6depgZuVXRsmKiIiISLW2fft2atasSUpKSkL6q1mzJqmpqQnpS6qe3bt3s337durWrRtTfV0GJiIiIlKN7d27N2GJikhKSkqFRvGUrIiIiIiISJWky8BEREREBIC0FzdWSrt5VzaulHbl4KeRFRERERERqZI0siIiIiIiYeI1EhLLSM2PP/7IhRdeCMD3339PcnIyDRs2BCArK6tkPk3r1q3Jzs4uWVdVLV68mL/+9a88+eSTzJ8/n5SUFNq1axeXttetW8fChQu55JJL4tJeZfSzZcsWrrvuOv7xj3/EJRaNrIiIiIhIYA477DBycnLIycnhyiuvZOjQoSXLlTnxv7CwsFLa+Mtf/sJ1110HQE5ODgsXLoxb/+vXr+f111+v8HaJ7Ofwww/nqKOOYsGCBXGJRSMrIiIiIvKrtGPHDm6//XaWL1/Onj17GDlyJH379mXdunVcf/317NixA4CHH36Ydu3aMX/+fMaOHUv9+vXJzc1lxowZXHLJJbRv356FCxeSnp7OK6+8Qq1atVizZg233norW7ZsoXbt2kyYMIETTzyRIUOGkJqaypIlS2jXrh1jx44tiWfbtm0sW7aM1q1bs27dOl588UWSk5OZPn06Dz30EC+99FLYtn/+858rFP8999zDV199RadOnRg4cCBpaWm888477Ny5k9WrV3PjjTeye/dupk+fTs2aNfn73/9OgwYNytyXunXrsmjRIr777jvuvfde+vXrt08/N9xwwz6vfWkxAvTt25e///3vtG/f/oDfYyUrIiIiIvKr9Mgjj9ClSxeeeuop8vLy6NmzJ926deOII47gjTfeIDU1ldWrV3P11VeTnZ0NeJdpffzxxxx33HGsW7eO1atX8/zzz/P4448zePBg3nrrLS699FKGDRvGo48+StOmTfnss88YPnw4M2fOBODbb7/l/fffJzk5OSye//znP7Ro0QKAJk2acOWVV1KnTh1uvPFGAF566aWwbe+9994KxT9mzBiefPJJpk+fDsDUqVNZvnw5//rXv9i1axennXYad999N/Pnz2fUqFFMmzaNoUOHlrkv3333He+99x5fffUVAwcOpF+/fvv0E01Zr/Gpp57KAw88EJf3WMmKiIiIiPwqZWVlMWvWLJ544gkAdu3axYYNG2jUqBG33XYbX3zxBUlJSaxevbpkm9NOO43jjjuuZLlJkyaccsopALRt25b169ezfft2Fi5cyKBBg0rq7d69u+R5v3799klUwDvxP/zww8uMOXTb/Yk/UufOnalbty5169alXr169OnTB4CWLVuybNmycvelb9++JCUl0bx5c3744YcyYw+1Z8+eUmM84ogj2LRpU8xtlUXJioiIiIiEqaxbGMebc44pU6bQrFmzsPLMzEyOPPJIcnJy2Lt3L0cddVTJukMPPTSsbs2aNUueJycn8/PPP7N3717q169PTk5O1H4j2yiWmppKQUFBmTGHbrs/8UcKjd/MSpaTkpIoKioqd19Ct3fOlRl7qKeffrrUGAsKCqhVq1bMbZVFE+xFRERE5FepZ8+eTJw4seQke/HixQBs3bqVo446iqSkJF599VWKiooq1G69evVo0qQJ//znPwHvJH7p0qXlbnfSSSexZs2akuU6deqwbdu2uMVft27dMtuL177E0k9Zr/Hq1atLLoc7UBpZERERERGg6v94Y8eOHUlK8r5rv+iiixg9ejSjRo2iY8eO7N27lyZNmjB9+nSuueYaLr/8cl599VV69epV6khIWSZOnMjw4cN5+OGHKSws5OKLL6Z169ZlbnPiiSeydetWtm3bRt26dcnIyOCKK67g3Xff5aGHHtqn/m233Vah+Fu1akVycjIdO3bkD3/4A2lpaZWyL5H9RJtgX9ZrPH/+fM4999yYYiuPVWS459cgPz//4NohiVlubu4+w6giQdHxKFWJjkcpS35+PvXr109YfwUFBaSmpiasv0R76qmnqFu3LldccUXQoQQmIyODadOmlZpMlXXM1a9f30KXdRmYiIiIiEicXH311ZX6+zBV3ZYtW7jhhhtiHvUpjy4DExGRMMOGDQNgwoQJAUciIvLrk5qayoABA4IOI24+/PBDxowZE1bWpEkTpk6dGrX+4Ycfzvnnnx+3/pWsiIhIGCUpIiJSrGfPnvTs2TOw/nUZmIiIiIiIVElKVkREJMymTZvi9mNeIiIiB0KXgYmISJjie+Pn5eUFHImIJFqdQd0qpd3tk7MrpV05+ClZERGRMI0aNQo6BBEREUDJioiIRFixYkXQIYhIwOI1EhLLSM2GDRs477zzmDdvHg0aNCAvL48uXbowc+ZMmjRpEla3cePGbNy4MS6xVaa3336bZcuWMWLECN5++21OOOEEmjdvHpe2lyxZwubNm+P2o4uV0c+yZct48skneeaZZw44Ds1ZEREREZHAHH300Vx99dXcfffdANx9990MHjx4n0SlMhQWFlZKG48//jjXXHMNAO+88w4rV66MW/9Lly5lzpw5Fd4ukf20atWKb7/9lm+++eaA41CyIiIiIiKBGjp0KP/+9795+umnWbBgATfeeGPM227ZsoXLL7+c7t270717dxYsWADA559/zjnnnEPnzp0599xzyc3NBWDq1KkMGDCACy64gAsvvJD58+fTt29frrjiCs4880yuvfZanHMALFq0iPPOO4+uXbty8cUXs3nzZgD69u3LyJEj6dat2z6jB6tWrSIlJYWGDRvy6aefMmvWLEaPHk2nTp1Ys2bNPttWJP7du3eTmZnJjBkz6NSpEzNmzCAzM5Prr7+ejIwMTj75ZN566y3uuusuOnToQP/+/dmzZ0+5+zJmzBh69OjB6aefzscffxy1n2hKe40B+vTpU+p2FaHLwEREJEzXrl0BmDdvXsCRiEh1UaNGDe677z769+/PG2+8QY0aNWLeduTIkQwdOpSzzz6bb775hv79+7Nw4UKaNWvGrFmzOOSQQ8jOzubee+/lpZdeArxLnD766CMaNGjA/PnzWbp0KZ988gnp6en07t2bBQsWcMYZZ3D77bfzyiuvcPjhhzNjxgzuu+8+nnrqKQD27NlDdnb2PvEsWLCANm3aANCuXTsyMjLo06cP/fr1K6kTuu0111xTofhHjRrFokWLePjhhwHIzMxk7dq1zJw5kxUrVnDuuecyZcoU7r33Xv74xz8ye/ZsevfuXea+FBYWkpWVxfvvv8+4ceN488039+knmrJe41NPPZVHH3205IeG95eSFRERCbN48eKgQxCRamjOnDk0atSIL7/8ku7du8e8XXZ2dthcu23btrF9+3a2bt3KkCFD+PrrrzGzkhEGgG7dutGgQYOS5dNOO43GjRsD0Lp1a9avX0/9+vVZvnw5F110EQB79+7lqKOOKtnmd7/7XdR4vvvuOw4//PAyYw7ddn/ij9SrVy9q1KhBq1atKCoqolevXgC0bNmS9evXk5ubW+a+XHDBBQC0bduW9evXlxl7qLJiPOKII0pGbw6EkhUREQkT7ZtCEaleKusWxqVZsmQJ2dnZzJkzh4yMDPr37x/znQn37t3LBx98QGpqalj5bbfdRufOnZk6dSrr1q3j/PPPL1l36KGHhtWtWbNmyfPk5GQKCwtxztG8efNS521EtlEsNTWVrVu3lhlz6Lb7E3+k4viTkpKoUaMGZlayXFRUVO6+FG9fvO+xeuCBB0qNsaCggFq1asXcVmk0Z0VERMK0bduWtm3bBh2GiFQTzjmGDx9OZmYmxxxzDDfeeCOjR4+OefsePXowceLEkuUlS5YA3rf+6enpALzyyisVjqtZs2Zs2bKFhQsXAt6lW8uXLy93u5NOOok1a9aULNepU4dt27bFLf7y2ovXvsTST1mv8apVq0p+t+tAaGRFRERERIBgfrxx8uTJHH300SWXfl1zzTVMnTqVnJwcOnXqFFZ3586dtGzZsmR56NChjBs3jltvvZUOHTpQVFREhw4dSuZKDBkyhPHjx9O7d+8Kx5WSksLkyZMZMWIEW7dupaioiCFDhpR7At6hQwfuvPNOnHOYGf3792fYsGE899xzTJkyZZ/6FY2/S5cuPPbYY3Tq1Ilbbrml0vYlsp+LL754nzplvcY5OTlxub2yFd/t4GCRn59/cO2QxCw3N5dmzZoFHYYI8Os+HjMzMwEYNWpUwJFIvPyaj0epfPn5+dSvXz9h/RUUFOxzydPBZsSIEWRkZNCtW7egQwnErl276Nu3L++99x6HHLLv2EhZx1z9+vUtdFmXgYmISJhx48Yxbty4oMMQEfnVGj58ODt37gw6jMBs2LCBMWPGRE1UKkqXgYmISJgRI0YEHYKIyK/akUceyXnnnRd0GHHz8ssv8+yzz4aVtW/fnvHjx0et37RpU5o2bRqXvpWsiIhIGF3+JSIioS677DIuu+yyQPrWZWAiIiIiIlIlaWRFRORXbkdWn7i2t2T1dgBOaVoHgEN7vBfX9kVERGKlZEVERMJk3PYFABtntA84EhFJtHh/+VFMX3rI/lKyIiJykIjXyUCbNl3j0o6IiMiBUrIiIiJh5s2bB1TeN6wiUvXF68uPWP+OHHbYYbRs2ZKioiKOPfZYnnvuOdLS0vap17hxYzZu3BiX2CrT22+/zbJlyxgxYgRvv/02J5xwAs2bN49L20uWLGHz5s1x+cHFyupn2bJlPPnkkzzzzDMHHEdCJtibWaqZLTSzxWa2zMzu8csnmdkaM1vkP9r65WZmj5vZKjNbYmanhbQ1yMxy/cegRMQvIiIiIpWnVq1a5OTk8Mknn9CgQQOef/75hPRbWFhYKW08/vjjXHPNNQC88847rFy5Mm79L126lDlz5lR4u0T206pVK7799lu++eabA44jUXcD2wX0cM61AdoCfcys+GLo25xzbf3HIr8sA2jmP/4XeAbAzA4DxgDtgLOAMWbWIEH7ICIiIiKV7KyzzmLTpk0x19+yZQuXX3453bt3p3v37ixYsACAzz//nHPOOYfOnTtz7rnnkpubC8DUqVMZMGAAF1xwARdeeCHz58+nb9++XHHFFZx55plce+21OOcAWLRoEeeddx5du3bl4osvZvPmzQD07duXkSNH0q1bt31GD1atWkVKSgoNGzbk008/ZdasWYwePZpOnTqxZs2afbatSPy7d+8mMzOTGTNm0KlTJ2bMmEFmZibXX389GRkZnHzyybz11lvcdddddOjQgf79+7Nnz55y92XMmDH06NGD008/nY8//jhqP9GU9hoD9OnTp9TtKiIhyYrzbPcXa/gPV8Ym/YAp/nYLgDQzSwd6A3Occz86534C5gC6TkFEJI6aN28et8sVREQqoqioiHnz5pGRkRHzNiNHjmTo0KHMnTuXKVOmcNNNNwHQrFkzZs2axfz587njjju49957S7ZZsmQJU6ZM4d133wW8UYTMzEw+/fRT1q5dy4IFC9izZw+33347U6ZMYd68eVx22WXcd999JW3s2bOH7OxsbrzxxrB4FixYQJs2bQBo164dGRkZ3HfffeTk5HD88cfvs21F4k9JSWHUqFFcfPHF5OTkcPHFFwOwdu1a3nrrLaZNm8Z1111H586d+fjjj0lNTWX27Nnl7kthYSFZWVlkZmYybty4UvuJVNZrfOqpp/Lxxx/H/D6WJmFzVswsGfgcOAF4yjn3qZkNAR4ws7uAD4GRzrldQGMgdNxog19WWnlUodmdVA96z6UqSdTx+Js491f8bRscF9d2JVh6H6U0qamp1KxZM6ysoKAgrn1Ethe5/PPPP9OxY0c2bdrEiSeeyNlnnx01BufcPuVz585l+fLlJctbt27lv//9L3l5edx55518/fXXmBmFhYUUFBSwZ88eOnfuTK1atSgoKGD37t20bduWhg0bsnv3blq0aMHq1atJTU3lyy+/pF+/foCXSB111FEUFBSwd+9e+vbtGzXGjRs3kpaWVrKuqKiI3bt3lyxHbrs/8Rc/By/R6NatG0VFRTRt2pSioiI6depEQUEBJ554Il9//TVffPFFmfvSu3dvCgoKaNGiBevWrYvaTzSjnwGoAAAgAElEQVQ//PBD1BgB6tWrx6ZNm6Juv3XrVr7//vuS5WbNmpXaR8KSFedcEdDWzNKAN8zsZGAUsBlIASYCI4B7S2+lYsracTn45Obm6j2XKiORx+MO/yucePVX8p/m8ivj2q4ER38fpSz5+fmkpqYCsMMvK/r4orj2Udw+eIlK6DJ4c1Y++ugjdu7cSf/+/XnppZe4/vrr92nHzPbZ1jnHhx9+uE/5nXfeSdeuXZk2bRrr1q3j/PPPJzU1lRo1alCvXr2S+ikpKdSqVatkuWbNmpgZKSkptGjRIuq8jaSkJBo0aLBPnwB16tRh69atJeuSk5NJSUkpWY7cdn/iP+SQQ0rqH3LIIRx66KElyzVq1KBWrVol+xbLvtStW5fU1FRq165NUVFR1H6iGT9+fNQYi/erdu3aUbevV68exxxzTKnthsUXU604cs7lAXOBPs65Tf6lXruAF/HmoQBsBEL34Gi/rLRyERGJk/T0dNLT04MOQ0Sqodq1a/Pggw/y5JNPxjz5vEePHkycOLFkecmSJYD37X3x37JXXnmlwrE0a9aMLVu2sHDhQsC7dCt0BKQ0J510EmvWrClZrlOnDtu2bYtb/OW1F699iaWfsl7jVatW0aJFiwrFGU1CRlbM7Ahgj3Muz8xqAecA48ws3Tm3ycwMuAj4wt/kLeBPZvYq3mT6fL/ebGBsyKT6c/FGZ0RERETkAFWFH29s06YNrVq14vXXX2fAgAFh63bu3EnLli1LlocOHcq4ceO49dZb6dChA0VFRXTo0IFHH32UYcOGMWTIEMaPH0/v3r0rHEdKSgqTJ09mxIgRbN26laKiIoYMGVLuCXiHDh248847cc5hZvTv359hw4bx3HPPMWXKlH3qVzT+Ll268Nhjj9GpUyduueWWStuXyH6izVsp6zXOycmJy+2VrfhuB5XJzE4BJgPJeKM5rznn7jWzLOAIwIBFwPXOue1+8vIk3uT5ncCVzrnP/LauAu7wm37AOfdiaF/5+fmVv0NSJekyB6lKEnoZmP87BvE6yRg2bBgAY/utjGu7Ehz9fZSy5OfnU79+/YT1F+0ysIPNiBEjyMjIoFu3bkGHEohdu3bRt29f3nvvPQ45ZN+xkbKOufr161vockJGVpxzS4BTo5T3KKW+A24oZd3fgL/FNUARESkxefJkAMb2a19OTRERiWb48OF89tlnQYcRmA0bNjBmzJioiUpF6RfsRUQkzGOPPeY/ezXQOEREfq2OPPJIzjvvvKDDiJuXX36ZZ599Nqysffv2jB8/Pmr9pk2b0rRp07j0rWRFRETCDB48GIAdWUpWREQELrvsMi677LJA+k743cBERERERERioZEVEREJM2vWLAC61CynooiISCVTsiIiImEGDhwIwMYZmmAvIiLBUrIiIiJhfrlXfn6gcYiIiGjOioiIhJk+fTrTp08POgwRqUbGjx9P+/bt6dChA506dYp6298hQ4bw5ptvBhBd7PLy8nj++efj2ubTTz/Nzp0749pmkP1UlJIVEREREQnMwoULmT17NvPmzePjjz/mzTffpHHjxgnpu6io6IC2LywsDFvOz8/nhRdeiKlurJ555hl+/vnn/dq2KvZTUUpWRERERKREWloaaWlpYWWXXnopaWlpJTfgAJg0aRJpaWkMGzaspGzTpk2kpaXRvHnzmPvbvHkzhx12GDVrenf1aNiwIenp6TFv//jjj9O9e3c6dOjA2LFjS8r/8Ic/0LVrV9q3b8+kSZNKyhs3bsyf//xnOnbsyMKFC2ndujVjx46lS5cudOjQga+++gqAHTt2cMMNN9CjRw86d+7MO++8A8DUqVMZMGAAF1xwARdeeGFYLPfccw9r1qyhU6dOjB49mvnz55ORkcGAAQNo164d4I1e9+jRg06dOnHzzTeXJEy33HIL3bp1o3379iX78eyzz7J582YuuOACzj///JL4R48eTfv27enXrx+ff/45ffv2pU2bNrz77ruAl4SNHj265HV58cUXAZg/fz59+/bliiuu4Mwzz+Taa6/FORe1n2iixVjZlKyIiEiYaCcqIiKVpUePHmzcuJHTTz+d4cOHk5OTE/O2WVlZrF69mqysLHJycli8eDEfffQRAE899RTz5s1j7ty5PPfcc/z444+Al4ScccYZfPTRR5x99tmAlyD961//4qqrruKJJ54A4JFHHqFLly5kZWUxc+ZM7rrrLnbs2AHAkiVLmDJlSklyUGzMmDEcf/zx5OTkcN999wGwePFiHnzwQT7//HNWrlzJjBkzmD17Njk5OSQnJ/Paa68BMHr0aLKzs/noo4/46KOP+OKLL7j++utp1KgRM2fO5O233y6Jv0uXLixYsIA6depw//33889//pOXX365JIF46aWXqFevHnPnzmXu3LlMnjyZtWvXArB06VIyMzP59NNPWbt2LQsWLIjaTzTRYqxsmmAvIiIiIiXy8vL2KYs2j23w4MElPyJbLD09Per2ZalTp07JJWDz58/nqquuYsyYMfzxj38sd9usrCyysrLo3Lkz4J3Ir169mo4dO/Lss8+WnHhv3LiR1atXc9hhh5GcnLzPiMgFF1wAQNu2bZk5c2ZJ27NmzSpJXnbt2sWGDRsA6NatGw0aNIhp/0477TSOO+44AObNm8fixYvp3r07AAUFBRx++OEAvPHGG0yaNInCwkK+++47Vq5cycknn7xPeykpKfTq1QuAli1bUrNmTWrUqEGrVq1Yv359SezLli0rmeOzdetWvv76a2rUqMFpp51Wcpld69atWb9+fUnSVp5YY4wnJSsiIhKm+ERjR1afgCMRkeoiOTmZzp0707lzZ1q1asUrr7wSU7LinOOWW27hyiuvDCufP38+8+bNY86cOdSuXZu+fftSUFAAQGpqKsnJyWH1iy9BS05OLplb4pxjypQpNGvWLKzuZ599xqGHHhrzvoXWdc4xcOBAxowZE1Zn7dq1PPHEE8ydO5e0tDSGDBlSEm+kGjVqYGYAJCUllcSelJRUckmZc46HHnqInj177vO6FNeP3N/yVCTGeNJlYCIiIiISmNzcXFavXl2yvHTpUo499tiYtu3Zsycvv/wy27dvB+Dbb7/lhx9+YOvWrdSvX5/atWvz1VdfRb27WCxtT5w4Eecc4F3OVZ66deuybdu2Utd37dqVN998kx9++AGAn376ifXr17Nt2zZq165NvXr1+P777/nggw9ibrO02F944QX27NkDwKpVq0ouYdvf2MuKsTJpZEVEREREArNjxw5uv/128vPzSU5O5re//S0TJkyIWvfmm29m1KhRgDfRfM6cOaxcuZJzzz0X8EYxJk6cSK9evXjxxRc566yzOOGEEzjjjDMqHNdtt93GqFGj6NixI3v37qVJkybl3tb9sMMOo3379px99tn06tWrJK5izZs358477+R3v/sde/fupUaNGowfP54zzzyTU045hTPPPJPGjRuXTMYHGDRoEL///e9p1KhRmfNJQl1xxRWsX7+erl274pyjYcOGTJ06tcxtyuundevWpcZYmaw4WzxY5OfnH1w7JDHLzc3dZ6hWJCiJPB6LL9c6tMd7cWnv0ksvBeBv1+XHtV0Jjv4+Slny8/OpX79+wvorKCggNTU1Yf1J1VPWMVe/fn0LXdbIioiIhJk9e7b35Lr2wQYiIiLVnpIVEREJM23aNP9Z9MswRETk4NazZ0927doVVvbcc8/RqlWrhMeiZEVERMJkZGQAsCNLyYqISHX04YcfBh1CCd0NTERERKQaS0pKYvfu3UGHIdXE7t27SUqKPQXRyIqIiISZNGkSAJfEdudQEfmVq1OnDtu3b+fnn39OSH9bt26lXr16CelLqp6kpCTq1KkTc30lKyIiEubmm28G4JIZmmAvUh2YGXXr1k1Yf99//z3HHHNMwvqTXzclKyIiEmbQoEH+s5WBxiEiIqJkRUREwhT/GFvx77eIiIgERRPsRURERESkStLIioiIhNm0aRMAmv4qIiJB08iKiIiEadGiBS1atAg6DBEREY2siIhIuEaNGgUdgoiICKCRFRERibBixQpWrFgRdBgiIiIaWRERqY7qDOpWbp0dg1Jjrrt9cvaBBSQiIhKFRlZERERERKRK0siKiEg1Fm1EpGvXrgC8W0adYrGMuoiIiOwvJSsiIhJm8eLF/rP2gcYhIiKiZEVERMJkZ2d7T34cGWgcIiIiSlZERCRM27ZtAdiRFXAgIiJS7WmCvYiIiIiIVEkaWRERkTCZmZkA3NQu4EBERKTaU7IiIiJhxo0bB8BNMzTBXkREgqVkRUREwowYMcJ/Ni/QOERERJSsiIhImFGjRgGwI0vJioiIBEsT7EVEREREpErSyIqIiIRZtGgRAM0CjkNERETJioiIhOnWrRsAGzXBXkREAqZkRUREwrRp0yboEERERAAlKyIiEmHePG9i/Y6sPgFHIiIi1Z0m2IuIiIiISJWkZEVERERERKqkhCQrZpZqZgvNbLGZLTOze/zy483sUzNbZWbTzSzFL6/pL6/y1x8X0tYov3ylmfVORPwiItVJ8+bNad68edBhiIiIJGxkZRfQwznXBmgL9DGz9sA44FHn3AnAT8DVfv2rgZ/88kf9ephZS2AA0AroAzxtZskJ2gcRkWph8+bNbN68OegwREREEpOsOM92f7GG/3BAD+B1v3wycJH/vJ+/jL++p5mZX/6qc26Xc24NsAo4KwG7ICJSbSxfvpzly5cHHYaIiEji7gbmj4B8DpwAPAWsBvKcc4V+lQ1AY/95Y+AbAOdcoZnlAw398gUhzYZus4/c3Nx47oL8Cug9l6okUcfjb/ajv1Nj2KZeDHViaUeqBr1HUpXoeJRQzZqV/jPECUtWnHNFQFszSwPeACr9guiydlwOPrm5uXrPpcpI5PG44xvv3/3pr6xtKtKuPntVm/4+SlWi41EqIuG/s+KcyzOzucDZQJqZHeKPrhwNbPSrbQSOATaY2SFAfeC/IeXFQrcREZE4GDZsGABj+wUciIiIVHuJuhvYEf6ICmZWCzgHWA7MBX7vVxsEvOk/f8tfxl+f5ZxzfvkA/25hxwPNgIWJ2AcRkepi8uTJTJ48ufyKIiIilSxRIyvpwGR/3koS8Jpz7m0z+xJ41czuB/4DvODXfwF4ycxWAT/i3QEM59wyM3sN+BIoBG7wLy8TEZE4eeyxx/xnrwYah4iISEKSFefcEn6Zhxla/jVR7ublnCsALimlrQeAB+Ido4iIeAYPHgzAjiwlKyIiEiz9gr2IiIiIiFRJCZ9gLyIiVdusWbMA6FIz4EBERKTaU7IiIiJhBg4cCMDGGe0DjkRERKo7JSsiIhKmd+/e/rP8QOMQERFRsiIiImGmT58OwI6sPgFHIiIi1Z0m2IuIiIiISJWkZEVERERERKokJSsiIhImLS2NtLS0oMMQERFRsiIiIiIiIlWTkhUREQmTl5dHXl5e0GGIiIgoWRERERERkapJyYqIiIiIiFRJ+p0VEREJc+mllwLwt+sCDkRERKo9JSsiIhJm9uzZ3pPr2gcbiIiIVHtKVkREJMy0adP8ZxMCjUNERETJioiIhMnIyABgR5aSFRERCZYm2IuIiIiISJWkkRUREQkzadIkAC45Ntg4RERElKyIiEiYm2++GYBLZmiCvYiIBEvJioiIhBk0aJD/bGWgcYiIiChZERGRMBMmeBPrd2T1iXmbOoO6xa3/7ZOz49aWiIj8ummCvYiIiIiIVEkaWRERkTCbNm0CoF4MdeM5ChLP0RkRETk4aGRFRETCtGjRghYtWgQdhoiIiEZWREQkXKNGjYIOQUREBNDIioiIRFixYgUrVqwIOgwRERElKyIiIiIiUjUpWRERERERkSpJyYqIiITp2rUrXbt2DToMERERTbAXEZFwixcv9p+1DzQOERERJSsiIhImOzvbe/LjyEDjEBERUbIiIiJh2rZtC8COrIADERGRak9zVkREREREpErSyIqIiITJzMwE4KZ2AQciIiLVnpIVEREJM27cOABumqEJ9iIiEiwlKyIiEmbEiBH+s3mBxiEiIqJkRUREwowaNQqAHVlKVkREJFiaYC8iIiIiIlWSRlZERCTMokWLAGgWcBwiIiJKVkREJEy3bt0A2KgJ9iIiEjAlKyIiEqZNmzZBhyAiIgIoWRERkQjz5nkT63dk9Qk4EhERqe40wV5ERERERKokJSsiIiIiIlIlKVkREZEwzZs3p3nz5kGHISIiojkrIiISbvPmzf6z44IMQ0RERCMrIiISbvny5SxfvjzoMERERBKTrJjZMWY218y+NLNlZjbML7/bzDaa2SL/cV7INqPMbJWZrTSz3iHlffyyVWY2MhHxi4hUJ+np6aSnpwcdhoiISMIuAysEhjvn/p+Z1QU+N7M5/rpHnXPjQyubWUtgANAK+A3wgZmd6K9+CjgH2AD828zecs59mZC9EBERERGRhElIsuKc2wRs8p9vM7PlQOMyNukHvOqc2wWsMbNVwFn+ulXOua8BzOxVv66SFRGROBk2bBgAY/sFHIiIiFR7CZ9gb2bHAacCnwIdgT+Z2RXAZ3ijLz/hJTILQjbbwC/JzTcR5e1K6ys3Nzduccuvg95zqUoSdTz+Zj/6O7WMbSZPngzA2H7tK9zugSgrJjlwel2lKtHxKKGaNWtW6rqEJitmVgf4B3Czc26rmT0D3Ac4/99HgKvi1V9ZOy4Hn9zcXL3nUmUk8njc4X+Fsz/9Rdvmscce85+9ut/tHgh9juNPfx+lKtHxKBWRsGTFzGrgJSpTnXMzAJxz34Ws/yvwtr+4ETgmZPOj/TLKKBcRkTgYPHgwADuyXg02EBERqfYSdTcwA14Aljvn/hJSHnq7md8BX/jP3wIGmFlNMzseaAYsBP4NNDOz480sBW8S/luJ2AcREREREUmsRI2sdAQuB5aa2SK/7A5goJm1xbsMbC1wHYBzbpmZvYY3cb4QuME5VwRgZn8CZgPJwN+cc8sStA8iItXCrFmzAOhSM+BARESk2kvU3cByAIuy6t0ytnkAeCBK+btlbSciIgdm4MCBAGyc0T7gSEREpLpL+N3ARESkauvdu/h3ePMDjUNERETJioiIhJk+fToAO7L6BByJiIhUdwmZYC8iIiIiIlJRSlZERERERKRKUrIiIiJh0tLSSEtLCzoMERERJSsiIiIiIlI1KVkREZEweXl55OXlBR2GiIiIkhUREREREamalKyIiIiIiEiVpN9ZERGRMJdeeikAf7su4EBERKTaU7IiIiJhZs+e7T25rn2wgYiISLWnZEVERMJMmzbNfzYh6vq0FzdWSr+FldKqiIj8milZERGRMBkZGQDsyIqerIiIiCSKkhUREdkveVc2jks7lTVSIyIiv35KVkREJMykSZMAuOTYYOMQERHZr2TFzGoBe51zu+Icj4iIBOzmm28G4JIZmmAvIiLBiilZMbPxwGvOuYVm1hd4HXBmdqlzbmalRigiIgk1aNAg/9nKQOMQERGJdWTlj8Bd/vO7gMuAfOBRQMmKiMhBZMIEb2L9jqw+AUciIiLVXazJSm3n3E4zawj81jn3DwAza1J5oYmIiIiISHUWa7LylZn9ETgBmANgZocDP1dWYCIiEoxNmzYBUC/gOERERGJNVobi/TrYHuAqv6w38H5lBCUiIsFp0aIFABs1wV5ERAIWU7LinPs30CGibCowtTKCEhGR4DRq1CjoEERERABIirWimZ1jZi+Y2Ux/+Qwz61F5oYmISBBWrFjBihUrgg5DREQktmTFzG4EngFygS5+8c/A/ZUUl4iIiIiIVHOxzlm5GejpnFtrZiP8shXASZUTloiIVBWRtzDe2KS4/MDaPbTHewfWgIiIHPRivQysLvCN/9z5/9YAdsc9IhERCVTXrl3p2rVr0GGIiIjEPLLyL2Ak8EBI2U3A3LhHJCIigVq8eDEAh/bIi7o+7cWNAORd2Xi/2tePTYqISKxiTVZuBGaa2bVAXTNbCWwDzq+0yEREJBDZ2dlBhyAiIgLEfuviTWZ2JnAWcCzeJWELnXN7KzM4ERFJvLZt2wYdgoiICBD7yArOOQd86j9EREREREQqVanJipl9wy+T6UvlnDs2rhGJiEigMjMzARg1alTAkYiISHVX1sjKZQmLQkREqoxx48YBSlZERCR4pSYrzrl5iQxERESqhhEjRpRfSUREJAFimrNiZinAncBA4DfAt8CrwAPOuYLKC09ERBJNIyoiIlJVxDrB/hm8X6u/CVgHNAHuABoDV1VOaCIiIiIiUp3FmqxcBDR1zhX/QtiXZvYpsAolKyIiB5VFixYBuoWxiIgEL9ZkZTNQGwj9OeNawKa4RyQiIoHq1q0bAHl50X/BXkREJFFiTVZeAt4zsyeADcAxwA3AFDPrUVzJOZcV/xBFRCSR2rRpE3QIIiIiQOzJynX+v3dElF/vP8D7TZbfxiMoEREJzrx5uhmkiIhUDTElK8654ys7EBERERERkVBJQQcgIiIiIiISTUzJipm1MbMsM/vRzHb7jz1mtruyAxQRkcRq3rw5zZs3DzoMERGRmOesTAP+gfc7Kz9XXjgiIhK0zZs3Bx2CiIgIEHuy0gi4yznnKjMYEREJ3vLly4MOQUREBIh9zspk4A+VGYiIiFQN6enppKenBx2GiIhIzCMrDwKfmNkdwHehK5xzPaJvIiIiIiIisv9iTVZeB9YAb7Afc1bM7BhgCnAU3u+xTHTOTTCzw4DpwHHAWuB/nHM/mZkBE4DzgJ3AYOfc//PbGgTc6Td9v3NuckXjERGR0g0bNgyACRMmBByJiIhUd7EmK22Bhs65/b37VyEw3Dn3/8ysLvC5mc0BBgMfOuceNLORwEhgBJABNPMf7YBngHZ+cjMGOAMv6fnczN5yzv20n3GJiEiEyZO974CUrIiISNBiTVbmAy2BRfvTiXNuE7DJf77NzJYDjYF+QDe/2mQgGy9Z6QdM8Sf0LzCzNDNL9+vOcc79COAnPH3w7lYmIiJx8NhjjwUdgoiICBB7srIGeN/M3mDfOSt3VaRDMzsOOBX4FDjKT2QANuNdJgZeIvNNyGYb/LLSyqPKzc2tSGhyENB7LlVJoo7H3+xHf6eWsU3Hjh3Laa92hfsLtW+8tcPW63NcOfS6SlWi41FCNWvWrNR1sSYrtYF3gBTgmP0NxMzq4P1ey83Oua3e1BSPc86ZWVxvjVzWjsvBJzc3V++5VBmJPB53+F/h7E9/+xVjzsb935Yo8frtHVBMUib9fZSqRMejVERMyYpz7soD7cjMauAlKlOdczP84u/MLN05t8m/zOt7v3wj4UnR0X7ZRn65bKy4PPtAYxMRkV/MmjULgIyMjIAjERGR6i7WkRUA/MnxhwMlQyLOua9j2M6AF4Dlzrm/hKx6CxiEd2vkQcCbIeV/MrNX8SbY5/sJzWxgrJk18OudC4yqyD6IiEjZBg4cCEBeXl7AkYiISHUXU7JiZi2BqUAbvLtwmf8vQHIMTXQELgeWmlnxJP078JKU18zsamAd8D/+unfxblu8Cu/WxVcCOOd+NLP7gH/79e4tnmwvIiLx0bt376BDEBERAWIfWXkamAt0x5tsfxyQCXwcy8bOuRxCRmMi9IxS3wE3lNLW34C/xdKviIh40l4MnxdSWEo5AH3+Uvo6ERGRBIo1WWkDnOOc22Nm5pzLN7PbgC+AlysvPBERERERqa5iTVYKgBrAHmCLmR0L/AQ0rKzAREQk/vKu9O/2nh2xLCIiUgUlxVhvPr/MJ3kdmAXMA7IqIygREQlOWloaaWlpQYchIiIS862L/ydk8Q5gGVAHmFIZQYmIiIiIiFTo1sUAzrm9Zva2c+6nyghIRESCpVsWi4hIVVHmZWBmdoWZ9Q5ZPsPMNuDNW1lhZidVeoQiIiIiIlItlTdn5VZgc8jyRGAOcArwIfBwJcUlIiIiIiLVXHmXgR0DLAUws2OA1kAv/8cZR+L9aKOIiBxELr30UgCmT58ecCQiIlLdlZesFAIpeLcu7gCsCPnF+J1ArUqMTUREAjB79uygQxAREQHKT1bmAQ+Y2WTgRmBmyLrmhF8iJiIiB4Fp06YFHYKIiAhQfrIyDHgJ+F/gE2BcyLrLgfcqKS4REQlIRkZG0CGIiIgA5SQrzrmNQI9S1o2slIhERERERESI/RfsRUSkmpg0aRKTJk0KOgwREZGK/yikiIgc3G6++WYABg8eHGwgIiJS7SlZERGRMIMGDQo6BBEREaCMZMXMHnbO3eY/7+Gcy0pcWCIiEpQJEyYEHYKIiAhQ9pyV/w15/s/KDkRERERERCRUWZeBLTaz14EvgZpmdm+0Ss65uyolMhERCcSmTZsASE9PDzgSERGp7spKVn6PN7rSBDDgmCh1XGUEJSIiwWnRogUAeXl5AUciIiLVXanJinPue+B+ADM7xDl3ZcKiEhGRwDRq1CjoEERERIAY7wbmnLvSzBoAFwCNgY3A2865HyszOBERSbwVK1YEHYKIiAgQ449CmtnZwGrgeuAU4DpglV8uIiIiIiISd7H+zspjwFDn3KvFBWZ2KfA4cGZlBCYiIiIiItVbTCMrwInAaxFlrwMnxDccEREJWteuXenatWvQYYiIiPz/9u4/Su66vvf48y1UKQRdqJWEgGB1S8DjTbj+QK1t1p9JUC/qVUP06G6Kp3qOtonHHyHeY6WKDWn9kXjqtbVCsl4hRFutaAm5CHfD5VRthG4smnCXWimJCSiwSKJoo+/7x/e7MDPZ3zu739md5+OcOTPz/X7nM6/5zmQz7/l8Pt/vuHtWBoCLgWtqlr2BYmiYJGkO2bNnT9URJEkCxl+srAW+HhF/AtwNnA10Aq+aplySpIr09fVVHUGSJGD8RwP7p4h4OvBK4HTga8D1Hg1MkuaeJUuWVB1BkiRg/D0rZOaDwBemMYskSZIkPWq8E+wlSW1iw4YNbJtPplUAABz0SURBVNiwoeoYkiRZrEiS6m3cuJGNGzdWHUOSpPEPA5MktYd169ZVHUGSJGACxUpEnJWZd09nGElS9davX191BEmSgIkNA/sXgPLwxZIkSZI0rUbtWYmI24DbKAqV48rFlwGfmt5YkqSq9Pf3Ax7CWJJUvbGGgb0eeDbwHODEiLgdeEJEvBi4PTMfmu6AkqSZ1dXVBcDg4GC1QSRJbW+sYuW4zPw74O8i4o+Ai4A7gT8Gzo+Io5nZOd0hJUkzZ/HixZU+f8eWA1NuY3D1wiYkkSRVbaxi5eqIeCrwfeAE4BTgkcx8HUBEnDrN+SRJM2zXrl1VR5AkCRijWMnMCyLieOBZwK3AXwEnR8RngNvLywPTnlKSNGc92gvS13B/EprRKyNJah1jHg0sM49m5r8Av8zMPwCOUPyX0gl41jBJkiRJ02IiJ4V8d3mdmbkd2D4NeSRJFVu0aBEA+/btqziJJKndjbtYycyt5c3fmZ4okqRWcOjQoaojSJIETKxnBYDMfHA6gkiSWsPevXurjiBJEjCJYkWSNDlHbl5edYRxWbBgQdURJEkCxjHBXpIkSZKqYM+KJM2wk15yQ9URRrVmzRoANm/eXHESSVK7s2dFklSnt7eX3t7eqmNIkmTPiiSp3qZNm6qOIEkSMEM9KxFxVUTcFxF31Cy7LCIORER/ebmwZt36iLgrIu6MiGU1y5eXy+6KiEtnIrsktZuenh56enqqjiFJ0owNA9sKDHcYnE9m5pLycj1ARJwHXAw8s3zM/4yI4yLiOODTwArgPGBVua0kSZKkOWhGhoFl5i0RcfY4N78IuDYzfwH8e0TcBTyvXHdXZv4AICKuLbf9fpPjSlJb27FjBwArVqyoOIkkqd1VPWflXRHxVuA7wHvKE04uBL5Vs83+chnAPQ3LLxit8YGBgSZG1Wzge65W0vh5PH2E5TPjxLrnPn+ULKtWrQJg9+7d05JkpP0wWqbxq3+deoz7RK3Ez6NqdXZ2jriuymLlM8BHgCyvPw78YTOfYLQXrrlnYGDA91wtY7jP45Hy55ZKPqe3Hhj2uYfLsmzZshHXNcNY+2FKzzvC62x3/n1UK/HzqImorFjJzHuHbkfE3wJfL+8eAM6s2fSMchmjLJckNcn27durjiBJElDheVYiYkHN3dcCQ0cKuw64OCKeEBFPAzqBfwZ2A50R8bSIeDzFJPzrZjKzJEmSpJkzIz0rEbEN6AKeHBH7gQ8BXRGxhGIY2A+BtwNk5vci4osUE+ePAu/MzF+V7bwL2AkcB1yVmd+bifySJEmSZt5MHQ1s1TCLrxxl+48CHx1m+fXA9U2MJklq0NHRAcDg4GDFSSRJ7a6yYWCSJEmSNJqqD10sSWox9qhIklqFPSuSJEmSWpLFiiRJkqSWZLEiSaqzcuVKVq5cWXUMSZKcsyJJqrdz586qI0iSBFisSJIabNu2reoIkiQBFiuSpAYrVqyoOoIkSYBzViRJkiS1KHtWJGmWmNfdNenHHh260Tf2tlu3bgWgp6dn0s8nSVIzWKxIkuqsXbsWsFiRJFXPYkWSZpnDvX0TfkzHlgMADK5eOOa23d3dE25fkqTpYLEiSaqzefPmqiNIkgQ4wV6SJElSi7JYkSTVOXjwIAcPHqw6hiRJDgOTJNU799xzARgcHKw4iSSp3VmsSJLqzJ8/v+oIkiQBFiuSpAb79u2rOoIkSYBzViRJkiS1KIsVSZIkSS3JYkWSVGfp0qUsXbq06hiSJDlnRZJUb8+ePVVHkCQJsFiRJDXo6+urOoIkSYDFiiSpwZIlS6qOIEkS4JwVSZIkSS3KnhVJakEdWw4cs+zoKOuaacOGDQCsX79+Wp9HkqSx2LMiSaqzceNGNm7cWHUMSZLsWZGkVja4euFjd/qGWTYN1q1bN63tS5I0XhYrkqQ6Dv+SJLUKh4FJkiRJakkWK5KkOv39/fT391cdQ5Ikh4FJkup1dXUBMDg4WG0QSVLbs1iRJNVZvHhx1REkSQIsViRJDXbt2lV1BEmSAOesSJIkSWpRFiuSJEmSWpLFiiSpzqJFi1i0aFHVMSRJcs6KJKneoUOHqo4gSRJgsSJJarB3796qI0iSBFisSJIaLFiwoOoIkiQBzlmRJEmS1KIsViRJddasWcOaNWuqjiFJksWKJKleb28vvb29VceQJMk5K5Kkeps2bao6giRJgMWKJKlBT09P1REkSQIsViRJFTly8/L6+90nFDcalk/MlVN4rCSp1czInJWIuCoi7ouIO2qWnRoRN0bEQHl9Srk8IuJTEXFXRHw3Iv5rzWO6y+0HIqJ7JrJLUrvZsWMHO3bsqDqGJEkz1rOyFfgr4PM1yy4FbsrMKyLi0vL+OmAF0FleLgA+A1wQEacCHwKeAyRwW0Rcl5kPztBrkKS2sGrVKgAGBwenpf2TXnLDsMvndXcBcLi3b8JtNvbSSJLmhhkpVjLzlog4u2HxRUBXebsX6KMoVi4CPp+ZCXwrIjoiYkG57Y2Z+QBARNwILAe2TXN8SWory5YtqzqCJElAtXNWTsvMg+XtQ8Bp5e2FwD012+0vl420fEQDAwPNSapZw/dcraTx83j6CMuHd+Ix254/ocdP3uWXXz4jz9NoKq/v9Ib7/i04lvtErcTPo2p1dnaOuK4lJthnZkZENrvd0V645p6BgQHfc7WM4T6PR8qfW8b1Ob31wIjbzvXP+WRe35F76u/P9X00Uf59VCvx86iJqPKkkPeWw7sor+8rlx8AzqzZ7oxy2UjLJUmSJM1BVRYr1wFDR/TqBr5as/yt5VHBng88VA4X2wm8IiJOKY8c9opymSSpiTo6Oujo6Kg6hiRJMzMMLCK2UUyQf3JE7Kc4qtcVwBcj4hLgbuCN5ebXAxcCdwE/A1YDZOYDEfERYHe53YeHJttLkiRJmntm6mhgq0ZY9dJhtk3gnSO0cxVwVROjSZIaTNchiyVJmqgqh4FJkiRJ0ogsViRJkiS1pJY4dLEkqXWsXLkSgO3bt1ecZPI6tjT3YJGDq0c9rZckaZpYrEiS6uzc6YEWJUmtwWJFklRn27ZtVUeYtGb3gDS7h0aSNDEWK5KkOitWrKg6giRJgBPsJUmSJLUoixVJUp2tW7eydevWqmNIkuQwMElSvbVr1wLQ09NTbRBJUtuzWJEk1enu7q46giRJgMWKJKnB5s2bK33+ed1dE37Mke4Tjnns4d6+5gSSJFXGOSuSJEmSWpI9K5KkOgcPHgRgwYIFM/q8U+oJuXn5o21MpmdGktSaLFYkSXXOPfdcAAYHBytOIklqdxYrkqQ68+fPrzqCJEmAxYokqcG+ffuqjiBJEuAEe0mSJEktymJFkiRJUkuyWJEk1Vm6dClLly6tOoYkSc5ZkSTV27NnT9URJEkCLFYkSQ36+vqqjiBJEmCxIklqsGTJkqojSJIEOGdFkiRJUouyWJEk1dmwYQMbNmyoOoYkSRYrkqR6GzduZOPGjVXHkCTJOSuSpHrr1q2rOoIkSYDFiiSpwfr166uOIEkS4DAwSZIkSS3KYkWSVKe/v5/+/v6qY0iS5DAwSVK9rq4uAAYHB6sNIklqexYrkqQ6ixcvrjqCJEmAxYokqcGuXbuqjiBJEuCcFUmSJEktymJFkiRJUkuyWJEk1Vm0aBGLFi2qOoYkSc5ZkSTVO3ToUNURJEkCLFYkadrN6+4C4Ej3CXX3R3N06EbfdCQa3d69e2f+SSVJGobFiiQ1SceWAzX3ToRbi/tHh9+8ZS1YsKDqCJIkARYrkjTjju+6etzbDq5eOI1JJElqbRYrktRkg6sXMjAwQGdnZ7Gg79j1rWzNmjUAbN68ueIkkqR259HAJEl1ent76e3trTqGJEn2rEiS6m3atKnqCJIkARYrkqQGPT09VUeQJAlwGJgkSZKkFmWxIkmqs2PHDnbs2FF1DEmSHAYmSaq3atUqAAYHBytOIklqdxYrkqQ6y5YtqzqCJElACxQrEfFD4GHgV8DRzHxORJwKbAfOBn4IvDEzH4yIADYDFwI/A3oy8/YqckvSXLV9+/aqI0iSBLTOnJUXZ+aSzHxOef9S4KbM7ARuKu8DrAA6y8sfAZ+Z8aSSJEmSZkSrFCuNLgKGzkjWC7ymZvnns/AtoCMiFlQRUJIkSdL0qnwYGJDA/46IBP4mMz8LnJaZB8v1h4DTytsLgXtqHru/XHaQYQwMDExPYrUs33NV60Tgsc/h0PX5DVu1+uf0uc99LgC7d++uOMn4nV5eDwwMPLq/m7Of69/T2WwuvAbNHX4eVauzs3PEda1QrLwoMw9ExFOAGyNiX+3KzMyykJmw0V645p6BgQHfc1Xr1gNA8bdntM/jbPmczpacAEfKn7FOv+ePubf7hEdvT9WBs4rrkzpvmHJbVfLvo1qJn0dNROXFSmYeKK/vi4ivAM8D7o2IBZl5sBzmdV+5+QHgzJqHn1Euk6SmOXLz8kk9buiL7ZGbi1/6h75AHym/PM8WHrJYktQqKi1WIuIk4HGZ+XB5+xXAh4HrgG7givL6q+VDrgPeFRHXAhcAD9UMF5MktamTXvJYz8e87i4ADvf2TbndyRaukqTmqLpn5TTgK8URiTkeuCYzb4iI3cAXI+IS4G7gjeX211MctvguikMXr575yJLaRe0X4PHo2FJ09A6uXlg3zKGZX54lSWonlRYrmfkDYPEwy+8HXjrM8gTeOQPRJKltrVy5EvB8K5Kk6lXdsyJJajE7d+6sOoIkSYDFiiSpwbZt26qOIEkSYLEiSWqwYsWKqiNIkgS07hnsJUmSJLU5e1YkSXW2bt0KQE9PT6U5pmroKGxTMdvOkSNJc43FiiSpztq1a4HZX6xIkmY/ixVJUp3u7u6qI0xJU89n40khJalSFiuSpDqbN2+uOoIkSYDFiiRJY+rYcqBpbQ2uXti0tiRprvNoYJKkOgcPHuTgwYNVx5AkyZ4VSVK9c889F4DBwcGKk7SOZvSGNLN3RpLahcWKJKnO/Pnzq44gSRJgsSKpjY30S/eBs0ZfP9ft27ev6giSJAHOWZEkSZLUouxZkdT2GucjHLl5+OWSJGlm2bMiSaqzdOlSli5dWnUMSZLsWZEk1duzZ0/VESRJAixWJEkN+vr6qo4gSRJgsSJJarBkyZKqI0iSBDhnRZIkSVKLsliRJNXZsGEDGzZsqDqGJEkOA5Okkczr7pr0Y89vXowZt3HjRgDWr19fcRJJUruzWJEk1Vm3bl3VEVrOVArXIUfL68Or+6bcliS1C4sVSRrF4d6+ST1uYGCAzs7O5oaZIfaoSJJahcWKJEljmGzRWqsZvTOS1G6cYC9JqtPf309/f3/VMSRJsmdFklSvq6sLgMHBwWqDSJLansWKJKnO4sWLq47Qco7cvHzqbXSfUNyoaeukl9ww5XYlaS6zWJEk1dm1a1fVEdpGx5YDU25jcPXCJiSRpNZksSJp1mj8YnfgrEum1N6Bs4rrIzdPqRnNYc3s+RiaYH9819VT/uxKUruwWJEkaQYNrl74aIE8lV6RZvTKSFKrs1iRNOsMfcEb+sI3HeP+2/kws4sWLQJg3759FSeRJLU7ixVJUp1Dhw5VHUGSJMBiRZLUYO/evVVHkCQJsFiRJDVYsGBB1REkSQI8g70kSZKkFmWxIkmqs2bNGtasWVN1DEmSHAYmSarX29sLwObNmytOMjfN6+569Gz2UzrqXNfVzQkkSS3MYkWSVGfTpk1VR2gb95ZFy2QcoDix5HAnNV1495UNS06EW8c+L8tUzvsiSdPBYkXStJrpE9e18/lRmqWnp6fqCHPS4d6+x+7cvLyyHJI0m1isSJI0w6ZyItOhgryu+CkdKYugxh6SgYEBOjs7R2xzpn9UkKTxsliRNCMOnHVJ09oabthLo+G+yGl8duzYAcCKFSsqTiJJancWK5KkOqtWrQJgcHCw4iQazXBDHkeauH/+WI05WV9Si7JYkTSjpjL8RTNj2bJlVUfQFE104v5ok/Vr+e9X0kyzWJEk1dm+fXvVETSKUYc4TvPE/YnObfHoYpKmymJFUh0n2kqz10g9HyNNsB/v0fOGemqO9r151O2OdziZpCablcVKRCwHNgPHAZ/LzCsqjiTNGc2cCD9eHm5Ymh3GGl42NJxsyHgOhgFwWu8jxyxrLHzspZHa06wrViLiOODTwMuB/cDuiLguM79fbTLNdkemefhEs8d6T3fe6WJh0vo6OjoAJ9i3g3EfNW+a/94MVwRNtvAZMlQANevIgNP9N7f2RJ4WZtJjIjOrzjAhEfEC4LLMXFbeXw+QmRsAHnroodn1giRJkiQB8KQnPSlq7z+uqiBTsBC4p+b+/nKZJEmSpDlkNhYrkiRJktrArJuzAhwAzqy5f0a5DDi260iSJEnS7DQbe1Z2A50R8bSIeDxwMXBdxZkkSZIkNdms61nJzKMR8S5gJ8Whi6/KzO9VHEuSJElSk83GnhUy8/rM/N3MfHpmfrTqPGodEfGGiPheRPw6Ip5TdR61p4hYHhF3RsRdEXFp1XnUviLiqoi4LyLuqDqLFBFnRsT/iYjvl/9Xr6k6k1rfrCxWpFHcAbwOuKXqIGpPNeeCWgGcB6yKiPOqTaU2thWYnSdl0lx0FHhPZp4HPB94p38fNRaLFc0pmbk3M++sOofa2vOAuzLzB5n5S+Ba4KKKM6lNZeYtwANV55AAMvNgZt5e3n4Y2Iunn9AYLFYkqbk8F5QkjSEizgbOB75dbRK1ulk3wV6KiG8A84dZ9T8y86sznUeSJI1fRMwD/h5Ym5k/rTqPWpvFimadzHxZ1RmkUYx6LihJamcR8RsUhcrVmfnlqvOo9TkMTJKay3NBSdIwIiKAK4G9mfmJqvNodrBY0ZwSEa+NiP3AC4B/jIidVWdSe8nMo8DQuaD2Al/0XFCqSkRsA74JnBMR+yPikqozqa39HvAW4CUR0V9eLqw6lFpbZGbVGSRJkiTpGPasSJIkSWpJFiuSJEmSWpLFiiRJkqSWZLEiSZIkqSVZrEiSJElqSRYrkjSKiLgsIr5Q3n5qRByOiOPG8bi/jogPjrI+I+IZzcynqYmIKyLi/vLw50TE68vD/R6OiGdN83OfFhH7IuIJ0/k8VYrCdyJiUdVZJM0eFiuS5ryI+GFEvGyq7WTmf2TmvMz81Ti2fUdmfmSqzzlZEfHm8kv24Yj4eUT8uub+4Wl4vuPLAuzsZrc9zue/PCK2jrHN/nJfHK65bCrXPQ34E+CczDyjfMjHgbeX7/m/TjLXePfLB4DPZeYvysfdGhE9EdE9yvs4ONbrqslxZUT8YXl7YURsiYhDEfHTiNgbER+KiN+s2f5xEXF3RHy3oZ07a57jVxHxSM3990fE28rlhxsuT8niXAmfAP5sMvtSUnuyWJGkOSgzry6/ZM8DVgA/GrpfLqsTEcfPfMpKrKjdD5m5tlx+FnBfZv4Eii/rwJnAtJ/QsywS3gJc3bguM3tr3rNXA/9Rk72jZtORXtfQWcOXAzsi4skUJ4k8HrggM59Yrvtt4Hdq2nsxcCqwKCLOr8lzTk2ebwLvqHnOvyg3+78NWeZl5n3lun8AXhERT5nCLpPURixWJLWV8tfqWyPiYxHxYET8e0SsqFn/tIjYFREPR8SNwJNr1p1d/kp+fESsjIjvNLT97oi4rry9NSIur1n3vog4GBE/GvqFu2ZdX0S8rTFjzf3NEXFP+Sv4bRHx+03aF/vLXP8KHCmXnRERX4mIH5f75p01278gIr4VEYPla/lURPxGufqW8vp75S/p/z0iXlb2aq0v2/tRRLw6Il4VEQMR8UBEvL+m/cdFxAci4t8i4icRcW1EnFKue0a5799a5v5xRFxarnsV8H5gqDfptgnuh+XADmBomN8W4KdAlK/nznHsm+Mj4oNl9p9GMdzp9OH2yzARXkBRKB2cSO4JOB+4t2z/vcD9wFsz826AzLw7M9+VmbWFWTfwZeCG8nZTZObPgH7g5c1qU9LcZrEiqR1dANxJUYj8BXBl+eszwDXAbeW6jzDyF7WvAedERGfNsjeVj69Tfhl+L8UXtE5gokPSdgNLKH7pvgb4UkScMME2RnIxRc9LR9mb8PXy+RaWed8XES8ttz0KrKHYN79H8Yv828t1f1BeP7P8Jf3vy/tnUPxfczrF/ryyfM7zgS7gwxHx1HLbdwOvLNs6AzgMfKoh7wuBZwDLgD+LiM7M/DrF+zjUm/TsieyAzLyB+l6L1cBQr8UzM/Occeyb9wGvL/dJB/A24JFR9kutZ1F8HqfLhcA/lrdfBny5HJI1rIiYB7yOoqfnauBNTe552wssbmJ7kuYwixVJ7ejuzPzbcu5JL7AAOK380vxc4IOZ+YvMvIWiKDlG+QvxV4FVAGXRsgi4bpjN3whsycw7MvMIcNlEwmbmFzLz/sw8mpkfB54AnDORNkaxOTP3Z+bPKX7hf2Jm/nlm/jIz7+Kx4oLM3J2Z3y5z/AD4LLB0jPYfAa7IzP8ErqUYbvTJzDycmd+l+JL+X8pt3wF8IDMPZOYjFHMb3lAWCkMuy8xHMvN2iiFaE/3S+/WyZ2josnqcjxt131AUJx/IzIHM/HVm9mfmA+NsuwN4eGIv4xijva5XAteXt38LGKsH5/UUheJNFJ/n36QoaMfrRQ1ZGguxh3msGJSkUbXLGGVJqnVo6EZm/qzsVJlH0WPwYFlQDLmbYu7CcK6hmIT9YYpelX8oi5hGp1P01tS2OW4R8V7gkrKdBJ5IzfC0Kbqn5vZZFEOhBmuWHQf0lTkWUbzeZwMnUvwf8u0x2v9JzQEJfl5e31uz/ucU+x7gqcDXIuLXDW08Or8hMw/VLP9ZzWPH61WZ2TfBx8AY+4biM/Jvk2gX4EHg5Ek+dsiwrysifotiLsrQ+3Q/RXE+mm5ge/m+/TwivlIuG7ZwH8atmdk1yvqTgcFR1kvSo+xZkaTHHAROiYiTapY9daSNgRuB346IJRQ9LMcMAatpt7bgaWzzCMWX/yHzh26U81PeT9E7c0o5qfohivkUzVA7HOgeYCAzO2ouJ2fmq8v1fwPcATyjnJj9pzU5RhxWNAH7gZc3PP8JDQXKeF7HdBhr39wDPH2Sub4L/G6zgjZYDnwjM4cKwG8Ar60Z9lgnIs6i6C3rieJoYYeA1wCvHpo/1ATnAnua1JakOc5iRZJK5YTj71DMhXh8RLyIYi7DSNv/J/Al4C8p5pPcOMKmX6T48ndeRJwIfKhhfT/wuog4MYpzr1xSs+5kirkiPwaOj4g/pehZmQ7fBH4ZEe+JiBMi4riIeFZEDM0BOZmiUDoSEefy2HwVyl/h76f+iFIT9dfAnw/NYYmIp0TEfxvnY+8Fzh7pS3gTjLVvPgdcHhFPj8KSiDh1nPvlmxRF7/xRtpms2vkqAB+j6JXbUrOfz4jiIA7PBN4KfJ9imOGS8nIORW/kxUxRFEc+W0JRNEnSmCxWJKnemygm4D9AUVR8foztr6GYtPylzDw63AaZuQPYBNwM3FVe1/ok8EuKL9y91B/CdifFEZn+H8XwsUeoH7rVNGX+C4HnAT8EfkLRmzJUHL2HYjjQw+Xy7Q1NfAi4ppyn8LpJRPgExWu9KSIeBv6JYg7ReGwHHg88EBH/PMp2O6L+/B9fGk/j49g3f0lxWN6bKI4k9llg6CAIo+6X8twq/wt483iyjOCY11XO9Xk5xWdo6Ll+QjH/BmB3uZ9vLF/PDyiKlU9n5qGay8HytY73qGC/H8eeZ2Xo8MevAW7MzHtHa0CShsQoBwSRJEkzICJOo5j/sqQsXprR5guBj2XmC5vR3lSVvV67gbdk5t6q80iaHSxWJEmagyLi+cCTMnPnmBtLUouyWJEkSZLUkpyzIkmSJKklWaxIkiRJakkWK5IkSZJaksWKJEmSpJZksSJJkiSpJVmsSJIkSWpJ/x9FWkrjvTZOCAAAAABJRU5ErkJggg==\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAysAAAHwCAYAAACmMfCZAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAIABJREFUeJzs3Xt8FNX9//HXBwSCglwr5IsW20pFKOXiBYSQhItgQESlLaAiePspUBsrRaQVsVIbUax4wQvVSlBEqmIVFRGLQUAQpQUpggZEEASVasI1QML5/TGTdLO5bWA3s5L38/HYBztnzpz5zNkNj/nsmTljzjlERERERETiTY2gAxARERERESmNkhUREREREYlLSlZERERERCQuKVkREREREZG4pGRFRERERETikpIVERERERGJS0pWRCQiZjbDzN6OUdsjzCy/rOUY7O9OM9sYq/Yry8zamdlKM8szs8+Djuf7xsz+ZGYbylo+hna3mdltx9pONJjZvWb2tZk5M7sy6Hjkf+LpeyJyPFKyIlKN+QmI81+HzWyXmS01s1vN7KSw6unALyvRdr6ZjYiw+hygRaRtVyKGJP/YTg9bNQXoEu39HYN7gd1Aa+DcsiqZ2QlmdpOf2Owxs91m9m8z+4OZNSql/sdmVmBmbcPKt4V87qW98v16z5axPqeCGEPr7jWz1ZX4LkTDPUBSpJXLScQ7Ag9HLaqjZGbdgLHANUAi8GKU24/pjwNl7DPV/36cWpX7rYiZZZnZk0HHISL/o2RFRJbgnQC1BHoAs4BfA/8ys2aFlZxzuc6576K5Y/PUcs4dcM59Fc22y+Oc2+uc21VV+4tAK2Cxc+5z59w3pVUws1rA68DdwN+BnsDPgT/gJV7Dw+onA42Bp4D/F9ZcR7zPPBE43y/rH1IWmji+E1Je+PppBMd0o1+3I7AQeNrMLi3j2GqYWc0I2oxItD5f59w3zrl90YjpGLUCDjnnXnPO7XTO5R1NI2ZW+1iCONbtRUSOinNOL730qqYvYAbwdinlLYBvgafLqgu0BRYAOcA+YD0wzF/3OeBCX375CCAfLyn6N3AISCssD2m7sF5vYB2QB7wPdAivExb3qf7+UoHTw2MAsvx6dwIbw7YdDnzsx7QN+BNwQsj6LOBJYAKw0++fmUC9Cvo4EXje76cDfjvn+OtKi/HOMtoZAxwBzi9jfaOw5WeB+4HOfqwJZWx3hr/fLqWsexZ4s5LfqRP89oaElW8GnvHf/wnYAAwFPvE/61b+uiuANf5nvhlvFOzEkHbqAtOBXP+4puGNTG0IqfOn0GW/rA+wFNjvfxZZwI/8uuGfwZX+NtuA20LaaAD8FfjGj28l0KuUvvwFXmK5H9iE/3cRUu8G//jzgP/6sfxfGf35bFhs+X65AeP8Pjrk7+emsG23AX8EHvf3s6yU9nuXcvxP+uuW+n39Z2AHsM0vrwVMwvs7PwD8B7gurN3f+p/jXn/b54DmYf0U+no79DsH3OzHvxd4Au97NRrYCnwHPAbUCtmf4Y3+fuL366fAeIr/DW8D7sAbLfsO+Arv+1WzjL52QFIE3/ltwF3A3/BGSL/x+8cq87ejl156lf7SyIqIlOCc2443wnKZmZX1/8RsvBOgrkA74Ba8EwDwLmUqwDvhKPw1vlANYLJfvzXwYRnt18A7CR0FnId3AvC6mdWN8DC+AAb678/zY7istIpm1h/vROMZ4Gd4icFoYGJY1V/gjVakAkOAi/BOGEtlZgb8A+84L/Lj+ApYaGZN/RgT8U52Jvvvp5TR3DBgkXNueWkrXciol5k19mOd4Zx739/nr8qKs4ocwDvJLXQa3ojPMLzEd4eZXQc8BNwHtMFLSC/ES0gK3Yv3uQ7D++4dwhvFKZOZ9QXm4yUX5/uvWXgnwPfgjVQVjjCWd5nVDKAXXkLV0W/vDTNrFVZvMvA03sjXi3ijSj/xY+kMPIJ3Mnsm3nfpuXLCH433fTxI8VGv3+B9P+/G67/7gSlmNjxs+98C2/FG364rpf138f5OC/jf8d8Ssn4oXpLWE++HBfxjG+C31wYv4bu/lH3fgvd/wyDgx3h9Dl6CNch/38nfZ+glpufj9d0FwJXA1cA84BygL3CVXzYiZJtJ/nHcCpzlH/dovJHH8P7Yive3mO5vU3gP0GhgOd7nUdgX7xOZm/12zwF+h/eZjYpwWxEpT9DZkl566RXcizJGVvx1N+L9snhKaXXxftkeUU7b+eHr8U4uHNC9lPLwkRVH8V+tG+H9ynptadv4ZUUjK/5ykr98eli9OwkZWcE7Uf17WJ10vBPs2v5yFrAmrM5jwPJy+qCXv/82IWV18H5pviOk7HPg9go+q/3AQxF+rr8FVoUs3wYsLaNuRSMr+X6/h75eLmffxUZW8BKUG/yy6/yyP+GdHLcI23YbJX+h74k3olQfOBnvpP3qsDr/ppyRFbwT0H9U9u+AkJEVvMTCAX1C1hve6MH0sL78TVh/7A/53v4SL6mvX4m/0+uAvLCyHcCfw8oeBj4Ni39BBO2PIOxvyS9fijdiaiFlrfxjPCOs7l3Ah+Xs41x/u2b+cqq/fGop37kdFB81WYCXcNcOKXsdeN5/Xw/vb7V3WFvXALvC+mNuWJ2F+CN+/nIW/shSJT6fbcA7YWX3Apsr045eeulV+ksjKyJSFvP/dWWsnwI86d+QeqeZdapE2x9EWK9oFMF5Iwfr8X5Fjra2eL8wh1oMJAA/CSlbE1bnS6AZZWsL/Nc593FhgXPuIN6vtZU9Dqu4SpHr8U7ACz0LnB9+o32E3gM6hL0i+cV4hpntxTuJvA9vBOCpkPVfOm8EDwAzKxw1eMi/KX+vv/08vGM/w3/V9mMKtayCWDoBb0UQc3kK+25JYYFzzvnL4f26OqROPt6oYOH3ZAHeL/CbzWy2mV1vZk0qE4g/ctac0r+zPzGzOiFlKyvTdik+9I+z0Dn+v6vDPqdb8RKZwhh7mtlbZvaFme3BSwLAuzeuIh875w6HLO/ESz4PhZWd4r9vh/e3+kpYTNOAJmGTT6ymuIr+hiMVPuK5DDjdzE6MQtsi1doJQQcgInGrLd7oyX9LW+mcm2Rms/Au0+kJ/N7M7nXO3V5BuwXuKG8QDnOklLJapZRF06GwZUfVTVTyCd4lN+Xyb6w/C3jAzB4IWVUD77Kr9Erud79z7mimeR6H9+v3XuCrsBNe8O5zClXYj7+m5Ek4eJfMHW2iWplELxrK/J4453ab2dl4o3698C49utfMejjnwk+ko+FYJwgo63PqgndvSKgjAGb2I7zPfgbeKOZ/8ZKUBXjJZkUOhy27MsoKYyn891Lgs1La2x3yPsi/YRE5CvoDFZESzKwF3nX5c51zpSUFADjnPnPOPeqc+wXejasjQ1YfAo51hqei6YXNrCHeSXjhKMXXQM3QGcvwfkEPVXhiUlEc64DksLIUvFGBTZUJuJR2m5hZUZLh/+rdGe+m5Mp4FuhpZueXtjLk1+P/h3dpS/hoyC3AMDNLqOR+j9ZXzrmNzpu9qqzRuVBf4l3+81N/u/DXQWAj3klr17Btw5fDrcK7wb4skXxX1/n/di8s8O9J6k4lP0vnXL5zLss5NwHv3pdv8O4NiXT7b/FGFkr7zhb2VWUcAmr4x1ORVf6/p5byGRUmCufhXe6Y7px7zzn3Cd5IUPg+4dj/jwBYi3d54I/L+O4UVKKto/1/K3wq9K7AFufc/qNoS0RCaGRFRGqbWXO8Hy+a4P3iOx4vGRhf2gZmVg/vJuKX8G6WbYg3wvJxSLXNQA8zm4837Wplp5J1eL84F964fzewh//djLzSX77HzP6Md7nWHWFtbMH7tbefmc0BDjrnckvZVwYwz7wHu83FO7m/E7g/7NKTylrkx/mcmY3GG6magHfJymOVbOtBvJuLF5jZXXiX1XyDl8DdCLxjZs/g3Vh/vXOu2Am0mW3BO85f4c1iFqnC70e40kZLjppzzpnZH4DHzSwXeBXvfpk2ePeJjPRHJf4K/NnMvgGy8ZKzM/CSnbLchTc5w1/wfu0/CHQDljjnsvG+qwP9pPJrYE/4Cb9z7hMze9mP7wa8+xRG402e8ItIj9PMLsObXGAJsAvvXo4WFP/biUQG3nd/E95IVG+8vgifpjoSm/FGny4ysxXAAefc3tIqOuc2mNlM4G9mNhZYgXfPyDlAY+fcfXgzcRkwxsyex/t7Ch9x3YL3N97fzF7EuydnN0fB/15MBib7Cdc/8UZZ2wPtnHOl/j9Whs1AV39ChFwgx7+UryLnmNkEvGdGdcYbIdSDIkWiQCMrItId7xftrXgnwFfgzVbUyZX97JN8vBven8K7j6TwBtjLQ+qMAc7Gu3m81GeHVOAI8Hu8aUs/xPtltn/hL5X+r8tD8X7R/AgvCbg1tAE//vF4Jw07gFdK25Fz7g28m3GH4/1K/gDwKN60r0fNP5m/BG+a2tfx7tVpDlxQ2eTNv4Y/De84h+Ddn7AW76R1JZDJ/561UuI4nXN78GbEquzJbA+8vgt/NahkOxVyzj2N95kOxPvMP8BLQLeHVBuL15fP4d37cxLe1LzltTsfbza2rnh99T7eDFCFlxb9Fe8m/RV439WyHn56Nd6J8Gy8ex/OA/r5CU+kvsX7TryFd1KfAfzROZdZiTbAu5n+j3hJwDq8GajGHkU7OG+GuUfwZsT7GphawSbX+vu/A+/v/2282dk+89v7N97lhqPxkrDf4s2WFbrP7Xgzdd2O932aW9m4w9qbiPfduBHv72Ip3oxpn1eyqfvwkpQ1eN+FSB8eOxUvaV7lv3+Q4rPYichRsij+MCYiIiIiIhI1GlkREREREZG4pGRFREREpBRmNiF0OuSwV07Q8YlUB7oMTERERKQU/jNtGpex2jnnjmW2QBGJwHGXrOTm5h5fByQiIiIiUk00aNCg2DTqugxMRERERETikpIVERERERGJS0pWypGdXZmp8yVS6tfoU59Gn/o0+tSnsaF+jT71afSpT6OvuvSpkhUREREREYlLSlZERERERCQunRB0ACIiIiISHOcce/fu5ciRIzHbR0JCArm5uTFrvzr6vvZpjRo1qFevHmZWcWWUrIiIiIhUa3v37qVOnTrUrl07ZvuoU6cOCQkJMWu/Ovq+9umhQ4fYu3cv9evXj6i+LgMTERERqcaOHDkS00RFJFTt2rUrNYqnZEVEREREROKSLgMTEREREQAaPr09Ju3uHNokJu3K8U8jKyIiIiIiEpc0siIiIiIixeRc3SIq7UQyUvPtt99y8cUXA/D1119Ts2ZNmjTxRmIWLVpUdD9Nu3btyMrKKloXr9asWcNf//pXHnnkEZYsWULt2rXp3LlzVNresmULK1eu5Je//GVU2juW/cyaNYvVq1dz3333FSs/dOgQAwcOZN68eZxwwrGnGhpZEREREZHANG7cmKVLl7J06VKuvvpqRo0aVbQcyxv/8/PzY9LGX/7yF2644QYAli5dysqVK6O2/61bt/Liiy9Weruq3E/t2rVJSUlh7ty5UYlFyYqIiIiIfC/t27eP0aNH07NnT7p3787rr78OeCMDaWlpJCcnk5yczPvvvw/AkiVLSEtLY8iQIXTu3JktW7Zw3nnn8Zvf/IYuXbpw6aWXcuDAAQA2b97MoEGDSElJIS0tjU8//RSAkSNH8tvf/pZevXpxxx13FItnz549rFu3jnbt2rFlyxaefvppHn30UZKSknjvvfdKbFvZ+P/4xz+yfPlykpKSeOKJJ5g1axaXX345l1xyCe3atWP69Ok88sgjdO/end69e/Pdd99VeCy33norffr0oX379rzyyisl9jNt2rQy+3/btm3079+fTp06cc899xSV9+/fnxdeeOHYPlyfLgMTERERke+l+++/n+TkZKZNm0ZOTg69evUiNTWVH/zgB7z88sskJCSwadMmrr32WrKysgDvMq333nuP008/nS1btrBp0yaefPJJHnroIUaMGMGrr77K4MGDSU9P54EHHuAnP/kJH374IWPGjGHevHkAfPnll7z11lvUrFmzWDz//ve/OeusswBo2bIlV199NfXq1eOmm24C4Jlnnim27V133VWp+CdOnMgjjzzCnDlzyMvL46WXXmL9+vW8++67HDx4kE6dOnHnnXeyZMkSxo8fz+zZsxk1alS5x/LVV1/x5ptv8umnnzJ06FAGDhxYbD/l+de//sXy5cupW7cuPXv2pG/fvnTs2JE2bdrwr3/9KyqfsZIVEREREfleWrRoEfPnz+fhhx8G4ODBg2zbto3mzZszduxY/vOf/1CjRg02bdpUtE2nTp04/fTTi5ZbtmzJz3/+cwA6dOjA1q1b2bt3LytXrmT48OFF9Q4dOlT0fuDAgSUSFfBO/Js2bVpuzKHbHk384bp37079+vWpX78+J598MhdeeCEAbdq0Yd26dRUeS//+/alRowatW7fmm2++KTf2cKmpqTRu3BiAiy66iOXLl9OxY0dq1qxJ7dq12bNnT8QPfyyLkhURERERKSZWUxhHm3OOmTNn0qpVq2LlGRkZnHLKKSxdupQjR47QrFmzonUnnXRSsbp16tQpel+zZk0OHDjAkSNHaNCgAUuXLi11v+FtFEpISCAvL6/cmEO3PZr4w4XGb2ZFyzVq1KCgoKDCYwnd3jlXbuzhzKzM5YMHD5KQkFCp9kqje1ZERERE5HupV69eTJ8+vegke82aNQDs3r2bZs2aUaNGDZ5//nkKCgoq1e7JJ59My5Yt+cc//gF4J/Fr166tcLszzzyTzZs3Fy3Xq1ePPXv2RC3++vXrl9tetI4l0v1kZWXx3XffceDAAV5//XW6dOkCeDO8NWnShFq1alUq1tJoZEVEREREgOhNWRyuotGGSHXr1o0aNbzf2i+55BImTJjA+PHj6datG0eOHKFly5bMmTOH6667jmHDhvH888/Tu3fvMkdCyjN9+nTGjBnDfffdR35+Ppdddhnt2rUrd5uf/vSn7N69u+jyp7S0NK666ireeOMN7r333hL1x44dW6n427ZtS82aNenWrRu/+tWvKrzk7GiPJXQ/l19+OaNHjy61XqdOnRg2bBhffvklv/rVr+jYsSPgTWTQp0+fiGKriFV2uCfe5ebmRu2AsrOzSwzLybFTv0af+jT61KfRpz6NDfVr9FW3Ps3NzaVBgwYx3UdeXl5ULgn6Ppg2bRr169fnqquuiul+4rlPr7zySu68807OOOOMUteX951r0KBBsWvLdBmYiIiIiEiUXHvttTF9Pky8O3ToEP379y8zUaksXQYmIhLH0tPTAXjwwQcDjkRERCKRkJDAkCFDgg4jav75z38yceLEYmUtW7Zk1qxZpdavXbs2Q4cOjdr+qyRZMbME4F2gjr/PF51zE83sR8DzQBNgFTDMOXfIzOoAM4Gzgf8Cg51zn/ttjQeuBQqA3zjnFlTFMYiIBEFJioiIBKlXr1706tUrsP1X1WVgB4Gezrn2QAfgQjPrAkwGHnDOnQF8h5eE4P/7nV/+gF8PM2sDDAHaAhcCj5pZyUmuRURERETke69KkhXn2esv1vJfDugJvOiXZwKX+O8H+sv463uZN3HzQOB559xB59xmYCNwXhUcgohIIHbs2MGOHTuCDkNERCQQVXbPij8Csgo4A5gGbAJynHP5fpVtQOF8eS2ALwCcc/lmlot3qVgLYEVIs6HbiIgcd8466ywAcnJyAo5ERKqDesNTY9Ju3hNvxqRdOf5VWbLinCsAOphZQ+BloHWs95mdnR0XbUhJ6tfoU59GXzz0aeEc+vEQSzQcL8cRb9Sv0Ved+jQhIaHoKeb1YrifaD1rRf7n+9qnu3fv5uuvvy5aLm+q8CqfDcw5l2Nm7wDnAw3N7AR/dOVUYLtfbTtwGrDNzE4AGuDdaF9YXih0mxKOdY706jbPelVRv0af+jT64qVPN27cGHQIURMvfXq8Ub9GX3Xr09zc3BLP69ibmRWVtkNHasp6Jsi2bdvo168fixcvplGjRuTk5JCcnMy8efNo2bJlsbotWrRg+/YyT/3ixmuvvca6desYN24cr732GmeccQatW0fnd/qPPvqInTt3kpycHNPnrBTup7yHO2ZkZFCvXj1uuummYuW7du3ihhtu4KWXXip1u5NPPpnTTjut1HXhquSeFTP7gT+igpnVBS4A1gPvAL/wqw0HXvHfv+ov469f5LynV74KDDGzOv5MYq2AlVVxDCIiIiISfaeeeirXXnstd955JwB33nknI0aMKJGoxEJ+fn7FlY6ijYceeojrrrsOgNdff51PPvkkavtfu3YtCxcurPR2Vbmfpk2b0qxZM1asWFFx5QpU1WxgicA7ZvYR8AGw0Dn3GjAOuMXMNuLdk/KUX/8poIlffgtwG4Bzbh3wd+Bj4E1gtH95mYiIiIh8T40aNYoPPviARx99lBUrVpT4pb48u3btYtiwYfTo0YMePXoUnSCvWrWKCy64gO7du9OnT5+iS/tmzZrFkCFDGDBgABdffDFLliyhf//+XHXVVZx77rlcf/31eL+Rw+rVq+nXrx8pKSlcdtll7Ny5E4D+/ftz2223kZqaymOPPVYsno0bN1K7dm2aNGnC+++/z/z585kwYQJJSUls3ry5xLaVif/QoUNkZGQwd+5cevXqxdy5c8nIyODGG28kLS2Nn/3sZ7z66qvccccddO3alUGDBnH48OEKj2XixIn07NmTs88+m/fee6/YfpKSkpg7d26Z/b927VouuOACOnXqRGZmZlF5//79eeGFFyL+HMtSJZeBOec+AjqWUv4Zpczm5ZzLA35ZRlt3A3dHO0YRkXiUkpICwOLFiwOOREQkdmrVqsWkSZMYNGgQL7/8MrVq1Yp429tuu41Ro0Zx/vnn88UXXzBo0CBWrlxJq1atmD9/PieccAJZWVncddddPPPMM4B3idOyZcto1KgRS5YsYe3atSxfvpzExET69u3LihUrOOecc7j11lt57rnnaNq0KXPnzmXSpElMmzYNgMOHD5OVlVUinhUrVtC+fXsAOnfuTFpaGhdeeCEDBw4sqhO67XXXXVep+MePH8/q1auZNGkSCQkJZGRk8PnnnzNv3jw2bNhAnz59mDlzJnfddRdXXHEFCxYsoG/fvuUeS35+PosWLeKtt95i8uTJvPLKK0X7ue+++8rt/3Xr1vH222+zf/9+kpOT6dOnD4mJiXTs2JG77z72U3Y9wV5EJI6tWbMm6BBERKrEwoULad68OR9//DE9evSIeLusrCw2bNhQtLxnzx727t3L7t27GTlyJJ999hlmVjTCAJCamkqjRo2Kljt16kSLFt4Es+3atWPr1q00aNCA9evXc8kl3pM1jhw5QrNmzYq2ufTSS0uN56uvviqaHKUsodseTfzhevfuTa1atWjbti0FBQX07t0bgDZt2rB161ays7PLPZYBAwYA0KFDB7Zu3Vpu7OH69etH3bp1qVu3LklJSaxatYqLLrqIH/zgB1GZel/JiohIHCvtVzsRkViL1RTGZfnoo4/Iyspi4cKFpKWlMWjQIJo3bx7RtkeOHOHtt98ucbP52LFj6d69O7NmzWLLli1cdNFFRetOOumkYnULZ0MDqFmzJvn5+TjnaN26dZn3bYS3USghIYHdu3eXG3PotkcTf7jC+GvUqEGtWrXwHk/oLRcUFFR4LIXbFx57ZRTuK3w5Ly+PunXrVqqt0lTVPSsiInIUOnToQIcOHYIOQ0QkZpxzjBkzhoyMDE477TRuuukmJkyYEPH2PXv2ZPr06UXLH330EeBNj5uYmAjAc889V+m4WrVqxa5du1i50pvL6fDhw6xfv77C7c4880w2b95ctFyvXj327NkTtfgrai9axxLpft544w3y8vL49ttvWbZsGZ06dQJg06ZNRc8KOxYaWRERERERIHpTFpdQzvNAMjMzOfXUU4su/bruuuuYNWsWS5cuJSkpqVjd/fv306ZNm6LlUaNGMXnyZH73u9/RtWtXCgoK6Nq1Kw888ADp6emMHDmSKVOm0Ldv30qHXLt2bTIzMxk3bhy7d++moKCAkSNHVngC3rVrV26//Xacc5gZgwYNIj09nSeeeIKZM2eWqF/Z+JOTk5k6dSq9evVizJgxMTuWwv0kJSVxyy23cNlll5Var23btgwYMID//ve/jB07tijBWrJkSbnTHkfKCmc7OF7k5uZG7YCq2zzrVUX9Gn3q0+iLlz7NyMgAYPz48QFHcuzipU+PN+rX6KtufZqbm0uDBg1iuo+8vLyYPhMk3owbN460tDRSU1Njto9479O0tDRmz55Nw4YNS6wr7zvXoEGDYteV6TIwEZE4NnnyZCZPnhx0GCIiUgljxoxh//79QYcRmF27djF69OhSE5XK0mVgIiJxbNy4cUGHICIilXTKKafQr1+/oMOImmeffZbHH3+8WFmXLl2YMmVKqfWbNm1a7oQAlaFkRUQkjh0Pl3+JiMj325VXXsmVV14ZyL51GZiIiIiIiMQljayIiFShyj67YFWOd83z2Q1PLLdezGbwERERCZCSFRGRONZ58acA5A/Us1ZEJPb2LbowJu3W7PqPmLQrxz8lKyIiAYh0JKR9Skq59av6KdMiIiJVScmKiEgcW7x4cdAhiEg1dFLPN6PSTqQjNY0bN6ZNmzYUFBTwwx/+kCeeeKLUaW9btGjB9u3boxJbLL322musW7eOcePG8dprr3HGGWfQunXrqLT90UcfsXPnTpKTk6PSXkX7Ke/BjhkZGdSrV4+bbrqpWPmuXbu44YYbeOmll445Dt1gLyIiIiKBqlu3LkuXLmX58uU0atSIJ598skr2m5+fH5M2HnroIa677joAXn/9dT755JOo7X/t2rUsXLiw0ttV5X6aNm1Ks2bNWLFixTHHoWRFREREROLGeeedx44dOyKuv2vXLoYNG0aPHj3o0aNH0QnyqlWruOCCC+jevTt9+vQhOzsbgFmzZjFkyBAGDBjAxRdfzJIlS+jfvz9XXXUV5557Ltdffz3OOQBWr15Nv379SElJ4bLLLmPnzp0A9O/fn9tuu43U1FQee+yxYvFs3LiR2rVr06RJE95//33mz5/PhAkTSEpKYvPmzSW2rUz8hw4dIiMjg7lz59KrVy/KOrr+AAAgAElEQVTmzp1LRkYGN954I2lpafzsZz/j1Vdf5Y477qBr164MGjSIw4cPV3gsEydOpGfPnpx99tm89957xfaTlJTE3Llzy+z/tWvXcsEFF9CpUycyMzOLyvv3788LL7wQ8edYFl0GJiISxwovG9iwYUPAkYiIxF5BQQGLFy9m2LBhEW9z2223MWrUKM4//3y++OILBg0axMqVK2nVqhXz58/nhBNOICsri7vuuotnnnkG8C5xWrZsGY0aNWLJkiWsXbuW5cuXk5iYSN++fVmxYgXnnHMOt956K8899xxNmzZl7ty5TJo0iWnTpgFw+PBhsrKySsSzYsUK2rdvD0Dnzp1JS0vjwgsvZODAgUV1Qre97rrrKhX/+PHjWb16NZMmTSIhIYGMjAw+//xz5s2bx4YNG+jTpw8zZ87krrvu4oorrmDBggX07du33GPJz89n0aJFvPXWW0yePJlXXnmlaD/33Xdfuf2/bt063n77bfbv309ycjJ9+vQhMTGRjh07cvfdd0f8OZZFyYqISBwr/OVLROR4duDAAZKSktixYwc//elP6dGjR8TbZmVlFftBZ8+ePezdu5fdu3czcuRIPvvsM8ysaIQBIDU1lUaNGhUtd+rUiRYtWgDQrl07tm7dSoMGDVi/fj2XXHIJAEeOHKFZs2ZF21x66aWlxvPVV1/RtGnTcmMO3fZo4g/Xu3dvatWqRdu2bSkoKKB3794AtGnThq1bt5KdnV3usQwYMACADh06sHXr1nJjD9evXz/q1q1L3bp1SUpKYtWqVVx00UX84Ac/qNQIWVmUrIiIxLH169cHHYKIVEOxmsK4LIX3rOzfv59Bgwbx17/+lRtvvDGibY8cOcLbb79NQkJCsfKxY8fSvXt3Zs2axZYtW7jooouK1p100knF6tapU6fofc2aNcnPz8c5R+vWrcu8byO8jUIJCQns3r273JhDtz2a+MMVxl+jRg1q1aqFmRUtFxQUVHgshdsXHntlFO4rfDkvL4+6detWqq3S6J4VEZE4lpiYSGJiYtBhiIhUiRNPPJF77rmHRx55JOKT5p49ezJ9+vSi5Y8++giA3bt3F/3/+dxzz1U6llatWrFr1y5WrlwJeJduRfID0plnnsnmzZuLluvVq8eePXuiFn9F7UXrWCLdzxtvvEFeXh7ffvsty5Yto1OnTgBs2rSJs846q1JxlkYjKyIiIiICRG/K4nB5eXkR123fvj1t27blxRdfZMiQIcXW7d+/nzZt2hQtjxo1ismTJ/O73/2Orl27UlBQQNeuXXnggQdIT09n5MiRTJkyhb59+1Y65tq1a5OZmcm4cePYvXs3BQUFjBw5ssIT8K5du3L77bfjnMPMGDRoEOnp6TzxxBPMnDmzRP3Kxp+cnMzUqVPp1asXY8aMidmxFO4nKSmJW265hcsuu6zUem3btmXAgAH897//ZezYsUUJ1pIlS8qd9jhSVjjbwfEiNzc3ageUnZ1Nq1atotWc+NSv0ac+jb5Y9WnhQxwjfShkeno6AA8++GBU2guSvqexoX6NvurWp7m5uTRo0CCm+8jLyytxmdPxbNy4caSlpZGamhqzfcR7n6alpTF79uxSn5dT3neuQYMGxa4r02VgIiJxLDMzs9hUkCIiEv/GjBnD/v37gw4jMLt27WL06NGlJiqVpcvARETi2NSpU4MOQUREKumUU06hX79+QYcRNc8++yyPP/54sbIuXbowZcqUUus3bdq03AkBKkPJiohIHBsxYkTQIYiISDV35ZVXcuWVVwayb10GJiIiIiIicUnJiohIHJs/fz7z588POgwREZFA6DIwEZE4NnToUABycnICjkRERKTqKVkREYljR/NsABERkeOFLgMTEYljc+bMYc6cOUGHISISU1OmTKFLly507dqVpKQkPvzwwxJ1Ro4cySuvvBJAdJHLycnhySefjGqbjz76aJVMgxzJflq0aBHzOMIpWRERERGRwKxcuZIFCxawePFi3nvvPV555ZUqOykuKCg4pu3z8/OLLefm5vLUU09FVDdSjz32GAcOHDiqbeNxP5WlZEVEREREijRs2LDEw/wGDx5Mw4YNi034MWPGDBo2bEh6enpR2Y4dO2jYsCGtW7eOeH87d+6kcePG1KlTB4AmTZqQmJgY8fYPPfQQPXr0oGvXrvz5z38uKr/88stJSUmhS5cuzJgxo6i8RYsW/OEPf6Bbt26sXLmSdu3a8ec//5nk5GS6du3Kp59+CsC+ffsYPXo0PXv2pHv37rz++usAzJo1iyFDhjBgwAAuvvjiYrH88Y9/ZPPmzSQlJTFhwgSWLFlCWloaQ4YMoXPnzoA3Yt6zZ0+SkpK4+eabixKmW265hdTUVLp06VJ0HI8//jg7d+5kwIABRc8tadGiBRMmTCA5OZmBAweyatUq+vfvT/v27XnjjTcALwmbMGFCUb88/fTTACxZsoT+/ftz1VVXce6553L99dfjnCt1P2UZP348Xbp04eKLL2bXrl0Rf05HS8mKiEgcK+2kQUTkeNKzZ0+2b9/O2WefzZgxY1i6dGnE2y5atIhNmzaxaNEili5dypo1a1i2bBkA06ZNY/Hixbzzzjs88cQTfPvtt4CXhJxzzjksW7aM888/H/ASpHfffZdrrrmGhx9+GID777+f5ORkFi1axLx587jjjjvYt28fAB999BEzZ84sSg4KTZw4kR/96EcsXbqUSZMmAbBmzRruueceVq1axSeffMLcuXNZsGABS5cupWbNmvz9738HYMKECWRlZbFs2TKWLVvGf/7zH2688UaaN2/OvHnzeO2114riT05O5t1336VevXr86U9/4h//+AfPPvtsUZLzzDPPcPLJJ/POO+/wzjvvkJmZyeeffw7A2rVrycjI4P333+fzzz9nxYoVpe6nNPv27aNjx46sWLGCbt26MXny5Ig/q6OlG+xFREREpEhpsw+Wdu/ciBEjSjy4NjExsdKzF9arV6/oErAlS5ZwzTXXMHHiRK644ooKt120aBGLFi2ie/fugHcyvWnTJrp168bjjz9edOK9fft2Nm3aROPGjalZs2aJEZEBAwYA0KFDB+bNm1fU9vz584uSl4MHD7Jt2zYAUlNTadSoUUTH16lTJ04//XQAFi9ezJo1a+jRowcAeXl5NG3aFICXX36ZGTNmkJ+fz1dffcUnn3zCz372sxLt1a5dm969e3Pw4EHatGlDnTp1qFWrFm3btmXr1q1Fsa9bt67oHp/du3fz2WefUatWLTp16lR0mV27du3YunVrUdJWkRo1anDZZZcB3mhbVTwoUsmKiEgc05TFIlId1KxZk+7du9O9e3fatm3Lc889F1Gy4pzjlltu4eqrry5WvmTJEhYvXszChQs58cQT6d+/P3l5eQAkJCRQs2bNYvULL0GrWbNm0b0lzjlmzpxJq1atitX98MMPOemkkyI+ttC6zjmGDh3KxIkTi9X5/PPPefjhh3nnnXdo2LAhI0eOLIo3XK1atTAzwEseCmOvUaNG0SVlzjnuvfdeevXqVaJfCuuHH+/RKIwjlnQZmIiIiIgEJjs7m02bNhUtr127lh/+8IcRbdurVy+effZZ9u7dC8CXX37JN998w+7du2nQoAEnnngin376aamzi0XS9vTp03HOAd7lXBWpX78+e/bsKXN9SkoKr7zyCt988w0A3333HVu3bmXPnj2ceOKJnHzyyXz99de8/fbbEbdZVuxPPfUUhw8fBmDjxo1Fl7AdbewAR44cKRqteeGFF+jSpUul4joaGlkRERERkcDs27ePW2+9ldzcXGrWrMmPf/xjHnzwwVLr3nzzzYwfPx7wbjRfuHAhn3zyCX369AG8UYzp06fTu3dvnn76ac477zzOOOMMzjnnnErHNXbsWMaPH0+3bt04cuQILVu2rHAq+caNG9OlSxfOP/98evfuXRRXodatW3P77bdz6aWXcuTIEWrVqsWUKVM499xz+fnPf865555LixYtim7GBxg+fDi/+MUvaN68ebn3k4S66qqr2Lp1KykpKTjnaNKkCbNmzSp3m0j2c9JJJ7Fq1SqmTJlC06ZNi27cjyUrzBaPF7m5uVE7oOzs7BJDf3Ls1K/Rpz6Nvlj1ab3hqQDszcyKqP7gwYOB0q8XP5r2gqTvaWyoX6OvuvVpbm4uDRo0iOk+8vLySEhIiOk+qpvvc5+W951r0KBBsWvLNLIiIhLHFixYEHQIIiIigVGyIiISx2bPnh10CCIiUs306tWLgwcPFit74oknaNu2bZXHomRFRCSOpaWlBR2CiIhUM//85z+DDqGIZgMTERERqcZq1KjBoUOHgg5DqolDhw5Ro0bkKYhGVkRE4tiMGTMASjx4TUQkWurVq8fevXs5cOBAzPaxe/duTj755Ji1Xx19X/u0Ro0a1KtXL+L6SlZEROLYzTffDChZEZHYMTPq168f0318/fXXnHbaaTHdR3VTXfpUyYqISBwbPnx40CGIiIgERsmKiEgcK+vBaCIiItWBbrAXEREREZG4pGRFRCSO7dixgx07dgQdhoiISCB0GZiISBw766yzAMjJyQk4EhERkaqnZEVEJI41b9486BBEREQCo2RFRCSObdiwIegQREREAqN7VkREREREJC4pWRERERERkbikZEVEJI6lpKSQkpISdBgiIiKB0D0rIiJxbM2aNUGHICIiEhglKyIicSwrKyvoEERERAKjZEVEJI516NAh6BBEREQCo3tWREREREQkLilZERGJYxkZGWRkZAQdhoiISCCUrIiIxLHJkyczefLkoMMQEREJhO5ZERGJY+PGjQs6BBERkcBUyciKmZ1mZu+Y2cdmts7M0v3yO81su5mt9l/9QrYZb2YbzewTM+sbUn6hX7bRzG6rivhFRIIyfvx4xo8fH3QYIiIigaiqkZV8YIxz7l9mVh9YZWYL/XUPOOemhFY2szbAEKAt8H/A22b2U3/1NOACYBvwgZm96pz7uEqOQkREREREqkyVJCvOuR3ADv/9HjNbD7QoZ5OBwPPOuYPAZjPbCJznr9vonPsMwMye9+sqWRGR49Lq1asBTWEsIiLVU5Xfs2JmpwMdgfeBbsCvzewq4EO80Zfv8BKZFSGbbeN/yc0XYeWdy9pXdnb2MccbjTakJPVr9KlPoy8Wfdqxkm2npqYC8MEHH0SlvaB9X+L8vlG/Rp/6NPrUp9F3vPRpq1atylxXpcmKmdUDXgJuds7tNrPHgEmA8/+9H7gmWvsr78AjkZ2dfcxtSEnq1+hTn0ZfrPs00rbbt28fUf3vw+ev72lsqF+jT30aferT6KsufVplyYqZ1cJLVGY55+YCOOe+Cln/V+A1f3E7cFrI5qf6ZZRTLiJy3Fm8eHHQIYiIiASmqmYDM+ApYL1z7i8h5Ykh1S4F/uO/fxUYYmZ1zOxHQCtgJfAB0MrMfmRmtfFuwn+1Ko5BRERERESqVlWNrHQDhgFrzWy1X/Z7YKiZdcC7DOxz4AYA59w6M/s73o3z+cBo51wBgJn9GlgA1AT+5pxbV0XHICIiIiIiVaiqZgNbClgpq94oZ5u7gbtLKX+jvO1ERI4nrVu3BmDDhg0BRyIiIlL19AR7EZE4tnPnzqBDEBERCYySFRGROLZ+/fqgQxAREQmMkhURkTiWmJhYcSUREZHjVJXMBiYiIiIiIlJZSlZEROJYeno66enpQYchIiISCCUrIiJxLDMzk8zMzKDDEBERCYTuWRERiWNTp04NOgQREZHAKFkREYljI0aMCDoEERGRwOgyMBERERERiUtKVkRE4tj8+fOZP39+0GGIiIgEQpeBiYjEsaFDhwKQk5MTcCQiIiJVT8mKiEgc69u3b9AhiIiIBEbJiohIHJszZ07QIYiIiARG96yIiIiIiEhcUrIiIiIiIiJxScmKiEgca9iwIQ0bNgw6DBERkUAoWRERERERkbikG+xFROKYpiwWEZHqTCMrIiIiIiISlzSyIiJyHKg3PDWq7e3NzIpqeyIiIkdDIysiInFs8ODBDB48OOgwREREAqGRFRGROLZgwYJy10d7BCTaIzQiIiLHQsmKiEgcmz17dtAhiIiIBEbJiohIHEtLSws6BBERkcDonhUREREREYlLSlZEROLYjBkzmDFjRtBhiIiIBEKXgYmIxLGbb74ZgBEjRgQbiIiISACUrIiIxLHhw4cHHYKIiEhglKyIiMSxBx98MOgQREREAqN7VkREREREJC4pWRERiWM7duxgx44dQYchIiISCF0GJiISx8466ywAcnJyAo5ERESk6ilZERGJY82bNw86BBERkcAoWRERiWMbNmwIOgQREZHA6J4VERERERGJS0pWREREREQkLilZERGJYykpKaSkpAQdhoiISCB0z4qISBxbs2ZN0CGIiIgERsmKiEgcy8rKCjoEERGRwChZERGJYx06dAg6BBERkcDonhUREREREYlLSlZEROJYRkYGGRkZQYchIiISCCUrIiJxbPLkyUyePDnoMERERAKhe1ZEROLYuHHjgg5BREQkMEpWRETi2Pjx44MOQUREJDC6DExEREREROKSkhURkTi2evVqVq9eHXQYIiIigdBlYCIicSw1NRWAnJycYAMREREJgJIVEZE41r59+6BDEBERCYySFRGROLZ48eKgQxAREQmM7lkREREREZG4pGRFRERERETikpIVEZE41rp1a1q3bh10GCIiIoHQPSsiInFs586dQYcgIiISGCUrIiJxbP369UGHICIiEhglKyIicSwxMTHoEERERAKje1ZERERERCQuKVkREYlj6enppKenBx2GiIhIIJSsiIjEsczMTDIzM4MOQ0REJBBVkqyY2Wlm9o6ZfWxm68ws3S9vbGYLzSzb/7eRX25m9pCZbTSzj8ysU0hbw/362WY2vCriFxEJytSpU5k6dWrQYYiIiASiqm6wzwfGOOf+ZWb1gVVmthAYAfzTOXePmd0G3AaMA9KAVv6rM/AY0NnMGgMTgXMA57fzqnPuuyo6DhGRKjVixIigQxAREQlMlYysOOd2OOf+5b/fA6wHWgADgcLrGzKBS/z3A4GZzrMCaGhmiUBfYKFz7ls/QVkIXFgVxyAiIiIiIlWryqcuNrPTgY7A+0Az59wOf9VOoJn/vgXwRchm2/yysspLlZ2dfczxRqMNKUn9Gn3q0+iLRZ92rGTb7777LgDJyclRj6U0lY2vsvQ9jQ31a/SpT6NPfRp9x0uftmrVqsx1VZqsmFk94CXgZufcbjMrWuecc2bmorm/8g48EtnZ2cfchpSkfo0+9Wn0xbpPI2373HPPBSAnJydmsZQmFseu72lsqF+jT30aferT6KsufVplyYqZ1cJLVGY55+b6xV+ZWaJzbod/mdfXfvl24LSQzU/1y7YDqWHlWbGMW0QkSH379g06BBERkcBU1WxgBjwFrHfO/SVk1atA4Yxew4FXQsqv8mcF6wLk+peLLQD6mFkjf+awPn6ZiMhxac6cOcyZMyfoMERERAJRVSMr3YBhwFozW+2X/R64B/i7mV0LbAF+5a97A+gHbAT2A1cDOOe+NbNJwAd+vbucc99WzSGIiIiIiEhVqpJkxTm3FLAyVvcqpb4DRpfR1t+Av0UvOhERz75F/5tc8P+AfV+UXbcsJ/V8M3oBiYiIVHN6gr2ISBxr2LAhDRs2DDoMERGRQFT51MUiIvHupJ5vVnqWldBRGREREYkOJSsiInGsqqcsFhERiSe6DExEREREROKSkhUREREREYlLSlZEROLY4MGDGTx4cNBhiIiIBEL3rIiIxLEFC/TcWxERqb6UrIiIRFFFs4LtG57gvSmjXvhzWmbPnh2VuERERL6PlKyIiMSxtLS0oEMQEREJjJIVEZEoiPTJ9fWGpwKwNzOrWLme0yIiIlKSbrAXEYljM2bMYMaMGUGHISIiEoijGlkxs7rAEefcwSjHIyIiIW6++WYARowYEWwgIiIiAYgoWTGzKcDfnXMrzaw/8CLgzGywc25eTCMUEanGhg8fHnQIIiIigYl0ZOUK4A7//R3AlUAu8ACgZEVEJEYefPDBoEMQEREJTKTJyonOuf1m1gT4sXPuJQAzaxm70EREREREpDqLNFn51MyuAM4AFgKYWVPgQKwCExER2LFjBwCJiYkBRyIiIlL1Ik1WRgEPAoeBa/yyvsBbsQhKREQ8Z511FgA5OTkBRyIiIlL1IkpWnHMfAF3DymYBs2IRlIiIeJo3bx50CCIiIoGJeOpiM7sAGAKc4pwbYGbnACc75xbFLDoRkWpuw4YNQYcgIiISmIgeCmlmNwGPAdlAsl98APhTjOISEREREZFqLtIn2N8M9HbO3QMc8cs2AGfGJCoREREREan2Ik1W6gNf+O+d/28t4FDUIxIRkSIpKSmkpKQEHYaIiEggIr1n5V3gNuDukLLfAO9EPSIRESmyZs2aoEMQEREJTKTJyk3APDO7HqhvZp8Ae4CLYhaZiIiQlZUVdAgiIiKBiXTq4h1mdi5wHvBDvEvCVjrnjpS/pYhI9dXw6e0lyvLLWLe9ZVnb/MD7598l28q5usUxRigiIhLfIp662DnngPf9l4iIiIiISEyVmayY2Rf872b6MjnnfhjViEREjjPFRkCySikD9i0qvTwjIwOA8ePHF5WVNmIjIiJyPCpvZOXKKotCRERKNXnyZKB4siIiIlJdlJmsOOcWV2UgIiJS0rhx44IOQUREJDAR3bNiZrWB24GhwP8BXwLPA3c75/JiF56ISLDqDU896m0Lb6YvvPTraGhERUREqrNIb7B/DO9p9b8BtgAtgd8DLYBrYhOaiEjl7Ft0YdAhiIiISBRFmqxcAvzEOZfjL39sZu8DG1GyIiLVwN7MrEpvU3gj/LFMMbx69WoAOnTocNRtiIiIfF9FmqzsBE4EckLK6gI7oh6RiMgxOqnnm0GHEDWpqakA5OTklF9RRETkOBRpsvIM8KaZPQxsA04DRgMzzaxnYSXn3KLohygiUn21b98+6BBEREQCE2mycoP/7+/Dym/0X+A9k+XH0QhKREQ8ixdrYkYREam+IkpWnHM/inUgIiIiIiIioWoEHYCIiIiIiEhpIkpWzKy9mS0ys2/N7JD/Omxmh2IdoIhIdda6dWtat24ddBgiIiKBiPSeldnAS3jPWTkQu3BERCTUzp07gw5BREQkMJEmK82BO5xzLpbBiIhIcevXrw86BBERkcBEmqxkApcDs2IYi4iIhElMTCxzXeFDJ6MpP+otioiIHL1Ik5V7gOVm9nvgq9AVzrmepW8iIiIiIiJy9CJNVl4ENgMvo3tWRESqTHp6OgAPPvhgUVnO1S2ivp9YjNKIiIgcq0iTlQ5AE+ecZv8SEalCmZmZQPFkRUREpLqINFlZArQBVscwFhERCTN16tSgQxAREQlMpMnKZuAtM3uZkves3BH1qEREBIARI0YEHYKIiEhgIk1WTgReB2oDp8UuHBEREREREU9EyYpz7upYByIiIiXNnz8fgLS0tIAjERERqXqRjqwAYGb1gaaAFZY55z6LdlAiIuIZOnQoADk5OQFHIiIiUvUiSlbMrA3eAyHbAw4vWSl8mn3N2IQmIiJ9+/YNOgQREZHARDqy8ijwDtAD72b704EM4L3YhCUiIgBz5swJOgQREZHARJqstAcucM4dNjNzzuWa2VjgP8CzsQtPRERERESqqxoR1ssDavnvd5nZD/1tm8QkKhERERERqfYiTVaWAL/y378IzAcWA4tiEZSIiHgaNmxIw4YNgw5DREQkEJFOXfyrkMXfA+uAesDMWAQlIiIiIiJSqamLAZxzR8zsNefcd7EISERE/kdTFouISHVW7mVgZnaVmfUNWT7HzLbh3beywczOjHmEIiIiIiJSLVV0z8rvgJ0hy9OBhcDPgX8C98UoLhERERERqeYqugzsNGAtgJmdBrQDejvnvjWz24CNMY5PRKRaGzx4MKDnrYiISPVU0chKPlDbf98V2OCc+9Zf3g/UjWQnZvY3M/vazP4TUnanmW03s9X+q1/IuvFmttHMPgm7DO1Cv2yjnyyJiBzXFixYwIIFC4IOQ0REJBAVjawsBu42s0zgJmBeyLrWFL9ErDwzgEcoOXvYA865KaEFZtYGGAK0Bf4PeNvMfuqvngZcAGwDPjCzV51zH0cYg4jI987s2bODDkFERCQwFSUr6cAzwP8DlgOTQ9YNA96MZCfOuXfN7PQIYxoIPO+cOwhsNrONwHn+uo3Ouc8AzOx5v66SFRE5bqWlpQUdgoiISGDKTVacc9uBnmWsi8ZlWL82s6uAD4Ex/nTILYAVIXW2+WUAX4SVdy6v8ezs7GMOMBptSEnq1+hTn3pDsRC9vsjOzqbjMbV5YsTbRjv2yjux2FKs4tD3NDbUr9GnPo0+9Wn0HS992qpVqzLXVfo5K1H0GDAJcP6/9wPXRHMH5R14JLKzs4+5DSlJ/Rp96lPPPv/njGj0RXifHlWbS7dHvG1Zsc+YMQOAESNGVH7/leHHWigW3yd9T2ND/Rp96tPoU59GX3Xp08CSFefcV4XvzeyvwGv+4na8WcgKneqXUU65iMhx6eabbwaqIFkRERGJQ4ElK2aW6Jzb4S9eChTOFPYq8JyZ/QXvyohWwErAgFZm9iO8JGUIcHnVRi0iUrWGDx8edAgiIiKBKTNZMbP7nHNj/fc9nXOLjnYnZjYbSAWamtk2YCKQamYd8C4D+xy4AcA5t87M/o5343w+MNo5V+C382tgAVAT+Jtzbt3RxiQiEo/2Lbqw2PKfB5ZeXpaTekY074mIiMj3QnkjK/8PGOu//wdw8tHuxDk3tJTip8qpfzdwdynlbwBvHG0cIiIiIiLy/VFesrLGzF7EG+GoY2Z3lVbJOXdHTCITEalGyhoR2bHDu1o2MTGx3O0jHXkRERH5PikvWfkF3uhKS7z7RU4rpY6LRVAiIuI566yzAMjJyRfaLzYAACAASURBVAk4EhERkapXZrLinPsa+BOAmZ3gnLu6yqISEREAmjdvHnQIIiIigYloNjDn3NVm1ggYgPeAxu3Aa865b2MZnIhIdbdhw4agQxAREQlMjUgqmdn5wCbgRuDneDN3bfTLRUREREREoi7S56xMBUY5554vLDCzwcBDwLmxCExERERERKq3iEZWgJ8Cfw8rexE4I7rhiIhIqJSUFFJSUoIOQ0REJBCRjqxk4z0x/rmQsv/f3p1Hx3XWaR5/HhIg7TiNEgK242ws6sgwjJ1mX7otwmI7LKHpgDEckDzhAHOgkTksxsxA6EC3Y5oGm54elgEi0STGQBMIdBS3SUamfRqYLC0TwM4oMKRjISVkqRA7CbThN3/cW05Z1lJSLe+t0vdzjk6V7r311lOvrqT61Xvfe1+j7NAwAECD7N27N3UEAACSqbZY2SDpO7bfKek2SWdL6pT08gblAgBIGhoaSh0BAIBkqj0b2L/afpKkl0k6TdK3JV3N2cAAtIuOy0YnLFkg7RnV4SnXN8eKFSuSPC8AAEVQ7ciKIuJeSV9uYBYAAAAAOKLqYgUA5oPS+qWSpJGREXV2dkpDRy9vts2bN0uSNm3alOT5AQBIqdqzgQEAEtiyZYu2bNmSOgYAAEkwsgIABbZx48bUEQAASKbqYsX2WRFxWyPDAACOxuFfAID5bDaHgf2bJOWnLwYAAACAhpp2ZMX2jZJuVFaoHJcv/rCkTzU2FgBAkoaHhyVxCmMAwPw002FgF0p6uqRnSFpg+yZJj7b9Qkk3RcR9jQ4IAPNZd3e3JKlUKqUNAgBAAjMVK8dFxNclfd32WyRdIOkWSX8h6VzbhyOis9EhAWC+Wr58eeoIAAAkM1OxcrntMyX9VNIJkk6W9FBEvFqSbJ/S4HwAMK/t3r07dQQAAJKZtliJiGfbPl7S0yTtkfQ/JJ1k+9OSbsq/7ml4SgAAAADzzoxnA4uIwxHxb5J+GxF/KumQsms6d0riSmUAAAAAGmI2F4V8V34bEbFD0o4G5AEAVOjq6pIk7d+/P3ESAACar+piJSL687tPbEwUAMBE4+PjSZ6347LRurZXWr+0ru0BAOaH2YysSJIi4t5GBAEAHGvfvn2pIwAAkMysixUAQPMsWbKkKc9zZORjaML3Nar3CA0AYH6ZcYI9AAAAAKRAsQIABdbX16e+vr7UMQAASIJiBQAKbGBgQAMDA6ljAACQBHNWAKDAtm7dmjoCAADJUKwAQIH19vamjgAAQDIcBgYAAACgkChWAKDABgcHNTg4mDoGAABJcBgYABTYunXrJEmlUilxEgAAmo9iBQAKbNWqVakjAACQDMUKABTYjh07UkcAACAZ5qwAAAAAKCSKFQAAAACFRLECAAXW0dGhjo6O1DEAAEiCYgUAAABAITHBHgAKjFMWAwDmM0ZWAAAAABQSxQoAAACAQqJYAYACW7t2rdauXZs6BgAASTBnBQAKbOfOnakjAACQDMUKABTY9u3bU0cAACAZihUAKLA1a9akjgAAQDLMWQEAAABQSBQrAFBg/f396u/vTx0DAIAkOAwMAApsw4YNkqTe3t60QQAASIBiBQAKrKenJ3UEAACSoVgBgALbtm1b6ggAACTDnBUAAAAAhUSxAgAFNjY2prGxsdQxAABIgsPAAKDAli1bJkkqlUqJkwAA0HwUKwBQYIsXL04dAQCAZChWAKDA9u/fnzoCAADJMGcFAAAAQCFRrAAAAAAopKYcBmb7i5JeLunOiPhP+bJTJO2QdLakX0h6bUTca9uStkk6X9IDknoj4qb8MT2S/nve7EcjYqAZ+QEglZUrV0qSdu/e3dTnXdjTXZd2Due3B9cP1aU9AMD80qyRlX5Jqycse7+kayOiU9K1+feStEZSZ/71Fkmflo4UNxdLerakZ0m62PbJDU8OAAnt3btXe/fuTR0DAIAkmjKyEhHfs332hMUXSOrO7w9IGpK0MV/+pYgIST+w3WF7Sb7troi4R5Js71JWAG1vcHwASGZoaKipz3dwoL7PV68RGgDA/JTybGCLIqJ8pbNxSYvy+0sl3V6x3YF82VTLAaBtrVixInUEAACSKcSpiyMibEe92x0ZGSlEGzgW/Vp/9Kl0Wn47t75YcMxjR0ZGdG5NbTZPba+9cSb2X9HytQv6tf7o0/qjT+uvXfq0s7NzynUpi5U7bC+JiLH8MK878+Wjks6o2O70fNmoHj5srLx8aLonmO6FV2NkZKTmNnAs+rX+6NPMoXzsdU59sWf0qMdO7NNU/bt582ZJ0qZNm6bdrqbX3gSdnZ3spw1Cv9YffVp/9Gn9zZc+TXnq4qsk9eT3eyR9q2L5m5x5jqT78sPFdkp6qe2T84n1L82XAUDb2rJli7Zs2ZI6BgAASTTr1MXblY2KnGr7gLKzel0q6au2L5J0m6TX5ptfrey0xbcqO3XxekmKiHtsf0TS9fl2l5Qn2wNAu9q4cWPqCAAAJNOss4Gtm2LViybZNiS9fYp2vijpi3WMBgCFNtPhXwAAtDOuYA8AAACgkChWAKDAhoeHNTw8nDoGAABJFOLUxQCAyXV3d0uSSqVS2iAAACRAsQIABbZ8+fLUEQAASIZiBQAKbPfu3akjAACQDHNWAAAAABQSIysAWkrHZaNTrhs9a+ZtAABA62BkBQAKrKurS11dXaljAACQBCMrAFpSaf3SY5Ydum7qda1qfHw8dYS6yEa7Fkh76jfq1U4/ZwDA5ChWAKDA9u3blzoCAADJUKwAQIEtWbIkdYS6KK1fqpGREXV2dtbcFnOSAGD+YM4KAAAAgEKiWAGAAuvr61NfX1/qGAAAJEGxAgAFNjAwoIGBgdQxAABIgjkrAFBgW7duTR0BAIBkKFYAoMB6e3tTRwAAIBkOAwMAAABQSBQrAFBgg4ODGhwcTB0DAIAkOAwMAAps3bp1kqRSqZQ4CQAAzUexAgAFtmrVqtQRAABIhmIFAApsx44dqSMAAJAMc1YAAAAAFBLFCgAAAIBColgBgALr6OhQR0dH6hgAACRBsQIAAACgkJhgDwAFximLAQDzGSMrAAAAAAqJkRUAhXHoutUzbjN6VnnbBocBAADJMbICAAW2du1arV27NnUMAACSYGQFQOGceN41U67ruGxUklRav7RZcZLauXNn6ggAACRDsQIABbZ9+/bUEQAASIZiBQAKbM2aNakjAACQDHNWAAAAABQSxQoAFFh/f7/6+/tTxwAAIAkOAwOAAtuwYYMkqbe3N20QAAASoFgBgALr6elJHQEAgGQoVgCgwLZt25Y6AgAAyTBnBQAAAEAhUawAQIGNjY1pbGwsdQwAAJLgMDAAKLBly5ZJkkqlUlXbH7pu9Zyf68TzrpnzYwEAaASKFQAosMWLF6eOAABAMhQrAFBg+/fvr2q7WkZFahmNAQCgkZizAgAAAKCQKFYAAAAAFBLFCgAU2MqVK7Vy5crUMQAASII5KwBQYHv37k0dAQCAZChWAKDAhoaGUkcAACAZihUAKLAVK1akjgAAQDIUKwDaysKe7rq0c25dWgEAALVggj0AFNjmzZu1efPm1DEAAEiCkRUAbengwFBNjx8ZGVFnZ2d9wtRgy5YtkqRNmzYlTgIAQPNRrABAgW3cuDF1BAAAkqFYAYACY0QFADCfMWcFAAAAQCFRrABAgQ0PD2t4eDh1DAAAkuAwMAAosO7ubklSqVRKGwQAgAQoVgCgwJYvX546AgAAyVCsAECB7d69O3UEAACSYc4KAAAAgEKiWAEAAABQSMmLFdu/sH2z7WHbN+TLTrG9y/ZIfntyvty2P2X7Vts/sv3HadMDQGN1dXWpq6srdQwAAJJIXqzkXhgRKyLiGfn375d0bUR0Sro2/16S1kjqzL/eIunTTU8KAE00Pj6u8fHx1DEAAEiiqBPsL5DUnd8fkDQkaWO+/EsREZJ+YLvD9pKIGEuSEgAabN++fakjAACQTBGKlZD0z7ZD0mcj4nOSFlUUIOOSFuX3l0q6veKxB/JlkxYrIyMjNYerRxs4Fv1af+3Qp6flt9O/lgXTbnNuVW1Up0h92sgs1fX73Ez8edTnOabfB+Yj+qL+6NP6o0/rr136tLOzc8p1RShWXhARo7YfL2mX7f2VKyMi8kJm1qZ74dUYGRmpuQ0ci36tv6L2acdlo7PafvSs7PaZexbMuO1Mr5ff/+odyj8CauTr7ezsrF+f7hk90ibm177aLPRp/dGn9Tdf+jT5nJWIGM1v75R0paRnSbrD9hJJym/vzDcflXRGxcNPz5cBQFvq6+tTX19f6hgAACSRdGTF9omSHhER9+f3XyrpEklXSeqRdGl++638IVdJeoftr0h6tqT7mK8CFF9p/dKqtjt03ey2nw8GBgYkSdu2bUucpHhmO3JXLfY/ACiO1IeBLZJ0pe1ylisi4hrb10v6qu2LJN0m6bX59ldLOl/SrZIekLS++ZEBoHm2bt2aOgIAAMkkLVYi4ueSlk+y/G5JL5pkeUh6exOiAUAh9Pb2po5QOI0a+WjUSA0AYO6Sz1kBAAAAgMlQrABAgQ0ODmpwcDB1DAAAkkg9ZwUAMI1169ZJkkqlUuIktVnY033kmiv1cHBgqI6tAQCKimIFAAps1apVqSMAAJAMxQoAFNiOHTtSR6hJ5QhIPS5gtrCnu7ZAAICWwpwVAAAAAIVEsQIAAACgkChWAKDAOjo61NHRkToGAABJUKwAAAAAKCQm2ANAgbX6KYsBAKgFxQoAQJJ06LrVNT3+xPOuqVMSAAAyHAYGAAAAoJAYWQGAAlu7dq2kxl5vpdYRkVpHZAAAmArFCgAU2M6dO1NHAAAgGYoVACiw7du3p44AAEAyFCsAUGBr1qxJHQEAgGSYYA8AAACgkChWAKDA+vv71d/fnzoGAABJcBgYABTYhg0bJEm9vb1pgwAAkADFCgAUWE9PT+oIAAAkQ7ECAAW2bdu21BEAAEiGYgUAgAodl43Wvc3S+qV1bxMA5gMm2ANAgY2NjWlsbCx1DAAAkmBkBUBdHbpu9ZH7o2eVlyUK0waWLVsmSSqVSomTtL9GjH40YpQGAOYTihUAKLDFixenjgAAQDIUKwAa4sTzrjnyqTLH68/d/v37U0cAACAZ5qwAAAAAKCSKFQAAAACFRLECAAW2cuVKrVy5MnUMAACSYM4KgKQW9nSnjlBoe/fuTR0BAIBkKFYA1PX0quXTFXPK1voYGhpKHQEAgGQoVgAUwsGBodQRCmnFihWpIwAAkAzFCoAj6nGK4fIFIDldMQAAqBUT7AGgwDZv3qzNmzenjgEAQBKMrABAgW3ZskWStGnTpsRJZnboutXTrj9N0qHbJ1934nnX1D8QAKDlUawAQIFt3LgxdQQAAJKhWAGAAmuFEZVqR0VGRkbU2dl51LKZRmOmUu9TXnOCBwAoJuasAAAAACgkRlYAoMCGh4clcQrjsnqPgHBRUgAoNooVACiw7u5uSVKpVEobBDVp1EVSOUU4gHZHsQIABbZ8+fLUEQAASIZiBQAKbPfu3akjNMVcJ9qX1Xrq40ZN2G/UyEejRmoAoGgoVgAcpdY3jQAAAPVCsQIASKbWEZFai2sm7ANAsVGsAJgUVxQvhq6uLknS/v37EycBAKD5KFYAoMDGx8dTRwAAIBmKFQAosH379qWOAABAMhQrQIuZ/CxAC6Q9nB2oHS1ZsiR1BBRY9vegvr//XLsFQJE8InUAAAAAAJgMIytAi6r89HNkZESdnZ0J06BR+vr6JEnbtm1LnAQpTHV2scM1tjvZWdC4dguAIqJYAYACGxgYkESx0mo4hTEA1AfFCoBZ4U1Yc23dujV1hJZQ6/VWin6q7umuBzObkVV+fwG0GooVACiw3t7e1BHmhVqKncpCp94XmQSA+Y5iBcCc8KYMRVDriEitIzIAgMaiWAHaDG++2svg4KAkac2aNYmTtKdaip1W/l2b7HCwI5P2h2bfHh9eAGgUihWgQTizDuph3bp1kqRSqZQ4CTC1av7ecf0WAHNBsQK0qaJPGEZ1Vq1alToCZtBKk/unGwGZywcsh4feUEMaAJgZxQrQYKk/TeTsP61tx44dqSOgwWo+nOyMv6tLjjn9rRqa+bHlIqgRo82p/74CaDyKFUDFO2Rrtm9eTpN06PbGZAEwuaJM7j/t9r+Y8+9/vUZ1pvtQZC4XsDy++/KqtmvU3+7rX9CQZgHMQUsWK7ZXS9om6ThJn4+ISxNHQpMUrahoJUyABYqlCMVOrW0srDnB5GYaMWn0/4Jn7lkg7anvczAKBMxNyxUrto+T9PeSXiLpgKTrbV8VET9NmwwTTf3PpP7/BOql8p9JvT71PHTd3B+7aOChuT94oHvuj0VhdHR0SGKCPY5VLnZmc1HIsnr9fbuj54Q5P3ayYq08QjPT4atzGa2pp2pHfipN9j+xUXN+5vrhVGOLwAUqzW43BSRJjojUGWbF9nMlfTgiVuXfb5KkiNgsSffdd19rvSAAAAAAkqTHPOYxrvz+EamC1GCppMqjcw/kywAAAAC0kVYsVgAAAADMAy03Z0XSqKQzKr4/PV8m6dihIwAAAACtqRVHVq6X1Gn7CbYfJel1kq5KnAkAAABAnbXcyEpEHLb9Dkk7lZ26+IsR8ZPEsQAAAADUWSuOrCgiro6IP4qIJ0XEX9WrXduvsf0T27+3/YxptvuF7ZttD9u+oV7P365m0a+rbd9i+1bb729mxlZj+xTbu2yP5LcnT7Hd7/L9dNg2I5CTmGm/s/1o2zvy9T+0fXbzU7aWKvq01/avKvbNN6fI2Upsf9H2nbZ/PMV62/5U3uc/sv3Hzc7Yaqro027b91Xspx9qdsZWY/sM2//b9k/z//t9k2zDvjoLVfZpW++rLVmsNNCPJb1a0veq2PaFEbEiIqZ8840jZuzXiuvnrJH0FEnrbD+lOfFa0vslXRsRnZKuzb+fzIP5froiIl7ZvHitocr97iJJ90bEkyV9UtKW5qZsLbP4Xd5RsW9+vqkhW1O/pOkujrJGUmf+9RZJn25CplbXr+n7VJL+pWI/vaQJmVrdYUnvjoinSHqOpLdP8vvPvjo71fSp1Mb7KsVKhYjYFxG3pM7Rbqrs12dJujUifh4Rv5X0FUkXND5dy7pA0kB+f0DSqxJmaWXV7HeVff11SS+yzYk8psbvcgNExPck3TPNJhdI+lJkfiCpw/aS5qRrTVX0KWYpIsYi4qb8/v2S9unYy0uwr85ClX3a1ihW5iYk/bPtG22/JXWYNsH1c2ZnUUSM5ffHJS2aYrsTbN9g+we2KWiOVc1+d2SbiDgs6T5Jj21KutZU7e/yn+eHgHzd9hmTrMfs8De0MZ5re6/tQdtPTR2mleSHzJ4r6YcTVrGvztE0fSq18b7achPsa2X7u5IWT7Lqv0XEt6ps5gURMWr78ZJ22d6ff0Izb9WpX1Fhuj6t/CYiwnZM0cxZ+b76REnX2b45In5W76zALH1b0vaI+I3ttyobuTovcSZgopuU/Q09aPt8Sd9UdugSZmB7oaR/lLQhIn6dOk87mKFP23pfnXfFSkS8uA5tjOa3d9q+UtlhD/O6WKlDv057/Zz5aLo+tX2H7SURMZYPn985RRvlffXntoeUfSJDsfKwava78jYHbB8v6TGS7m5OvJY0Y59GRGX/fV7Sx5qQq93xN7TOKt8QRsTVtv+n7VMj4q6UuYrO9iOVvam+PCK+Mckm7KuzNFOftvu+ymFgs2T7RNsnle9LeqmyCeSoDdfPmZ2rJPXk93skHTN6Zftk24/O758q6fmSftq0hK2hmv2usq8vlHRdREw1koUq+nTC8emvVHYMNmpzlaQ35Wdaeo6k+yoOFcUc2F5cnp9m+1nK3jPxQcU08v76gqR9EfGJKTZjX52Favq03ffVeTeyMh3bfybp7yQ9TtI/2R6OiFW2T5P0+Yg4X9ncgCvzfeJ4SVdExDXJQreAavqV6+fM2qWSvmr7Ikm3SXqtJDk7NfTbIuLNkpZJ+qzt3yv7w3VpRFCsVJhqv7N9iaQbIuIqZf8k/sH2rcom474uXeLiq7JP32n7lcrOcnOPpN5kgVuE7e2SuiWdavuApIslPVKSIuIzkq6WdL6kWyU9IGl9mqSto4o+vVDSf7V9WNKDkl7HBxUzer6kN0q62fZwvuwDks6U2FfnqJo+bet91W30WgAAAAC0EQ4DAwAAAFBIFCsAAAAAColiBQAAAEAhUawAAAAAKCSKFQAAAACFRLECANOw/WHbX87vn2n7oO3jqnjcZ2x/cJr1YfvJ9cyH2ti+1Pbd+WlsZftC2wfyn/nTGvzci2zvL18bqR3l19W4wXZX6iwAWgfFCoC2Z/sXtl9cazsR8e8RsTAiflfFtm+LiI/U+pxzZfsN+Zvsg7YftP37iu8PNuD5js8LsLPr3XaVz/9R2/0zbHMg74uDFV9b83VPkPROSedExOn5Q/5W0lvzn/nNc8xVbb98QNl1p36TP26P7V7bPdP8HEszva6KHF+w/V/y+0ttX2Z73Pavbe+zfbHtP6jY/hG2b7P9ownt3FLxHL+z/VDF9++z/eZ8+cEJX4/Pr/vwCUl/OZe+BDA/UawAQBuKiMvzN9kLJa2R9Mvy9/myo9ieLxcJXlPZDxGxIV9+lqQ7I+IuKXuzLukMSQ2/OG1eJLxR0uUT10XEQMXP7BWS/r0ie0fFplO9rvIVsFdLGrR9qqTvK7uo8bMj4g/zdY+T9MSK9l4o6RRJXbbPrchzTkWe7yu7CG35OT+Wb/YvE7IsjIg783XflPRS24+vocsAzCMUKwDmlfzT6j22P277Xtv/z/aaivVPsL3b9v22d0k6tWLd2fmn5MfbXmv7hgltv8v2Vfn9ftsfrVj3Xttjtn9Z/oS7Yt2Q7TdPzFjx/Tbbt+efgt9o+0/q1BcH8lw3SzqULzvd9pW2f5X3zdsrtn+u7R/YLuWv5VO2H5mv/l5++5P8k/Q/t/3ifFRrU97eL22/wvbLbY/Yvsf2+yraf4TtD9j+me27bH/F9sn5uifnff+mPPevbL8/X/dySe+TVB5NunGW/bBa0qCk8mF+l0n6tSTnr+eWKvrmeNsfzLP/2tnhTqdN1i+TRHiuskJpbDa5Z+FcSXfk7b9H0t2S3hQRt0lSRNwWEe+IiMrCrEfSNyRdk9+vi4h4QNKwpJfUq00A7Y1iBcB89GxJtygrRD4m6Qv5p8+SdIWkG/N1H9HUb9S+Lekc250Vy16fP/4o+Zvh9yh7g9YpabaHpF0vaYWyT7qvkPQ12yfMso2pvE7ZyEtHPprwnfz5luZ532v7Rfm2hyX1Keub5yv7RP6t+bo/zW+fmn+S/o/596cr+19zmrL+/EL+nOdK6pZ0ie0z823fJelleVunSzoo6VMT8j5P0pMlrZL0l7Y7I+I7yn6O5dGkp8+mAyLiGh09arFeUnnU4qkRcU4VffNeSRfmfdIh6c2SHpqmXyo9Tdn+2CjnS/qn/P6LJX0jPyRrUrYXSnq1spGeyyW9vs4jb/skLa9jewDaGMUKgPnotoj4X/nckwFJSyQtyt80P1PSByPiNxHxPWVFyTHyT4i/JWmdJOVFS5ekqybZ/LWSLouIH0fEIUkfnk3YiPhyRNwdEYcj4m8lPVrSObNpYxrbIuJARDyo7BP+P4yIv46I30bErXq4uFBEXB8RP8xz/FzS5yStnKH9hyRdGhH/Iekryg43+mREHIyIHyl7k/6f823fJukDETEaEQ8pm9vwmrxQKPtwRDwUETcpO0Rrtm96v5OPDJW/1lf5uGn7Rllx8oGIGImI30fEcETcU2XbHZLun93LOMZ0r+tlkq7O7z9W0kwjOBcqKxSvVbY//4GygrZaL5iQZWIhdr8eLgYBYFrz5RhlAKg0Xr4TEQ/kgyoLlY0Y3JsXFGW3KZu7MJkrlE3CvkTZqMo38yJmotOUjdZUtlk12++RdFHeTkj6Q1Ucnlaj2yvun6XsUKhSxbLjJA3lObqUvd6nS1qg7H/ID2do/66KExI8mN/eUbH+QWV9L0lnSvq27d9PaOPI/IaIGK9Y/kDFY6v18ogYmuVjpBn6Rtk+8rM5tCtJ90o6aY6PLZv0ddl+rLK5KOWf093KivPp9Ejakf/cHrR9Zb5s0sJ9Ensionua9SdJKk2zHgCOYGQFAB42Julk2ydWLDtzqo0l7ZL0ONsrlI2wHHMIWEW7lQXPxDYPKXvzX7a4fCefn/I+ZaMzJ+eTqu9TNp+iHioPB7pd0khEdFR8nRQRr8jXf1bSjyU9OZ+Y/aGKHFMeVjQLByS9ZMLznzChQKnmdTTCTH1zu6QnzTHXjyT9Ub2CTrBa0ncjolwAflfSn1Uc9ngU22cpGy3rdXa2sHFJr5L0ivL8oTpYJmlvndoC0OYoVgAgl084vkHZXIhH2X6BsrkMU23/H5K+JulvlM0n2TXFpl9V9ubvKbYXSLp4wvphSa+2vcDZtVcuqlh3krK5Ir+SdLztDykbWWmE70v6re132z7B9nG2n2a7PAfkJGWF0iHby/TwfBXln8LfraPPKDVbn5H01+U5LLYfb/uVVT72DklnT/UmvA5m6pvPS/qo7Sc5s8L2KVX2y/eVFb2Lp9lmrirnq0jSx5WNyl1W0c+nOzuJw1MlvUnST5UdZrgi/zpH2Wjk61QjZ2c+W6GsaAKAGVGsAMDRXq9sAv49yoqKL82w/RXKJi1/LSIOT7ZBRAxK2irpOkm35reVPinpt8recA/o6FPY7lR2Rqb/q+zwsYd09KFbdZPnP1/SsyT9QtJdykZTysXRu5UdDnR/vnzHhCYulnRFPk/h1XOI8Allr/Va2/dL+ldlc4iqsUPSoyTdY/v/TLPdoI++/sfXqmm8ir75G2Wn5b1W2ZnEPiepfBKEafslv7bKP0h6QzVZpnDM68rn+rxE2T5Ufq67lM2/kaTr837elb+enysrVv4+IsYrvsby11rtWcH+viaJLAAAAI9JREFUxMdeZ6V8+uNXSdoVEXdM1wAAlHmaE4IAAIAmsL1I2fyXFXnxUo82nyfp4xHxvHq0V6t81Ot6SW+MiH2p8wBoDRQrAAC0IdvPkfSYiNg548YAUFAUKwAAAAAKiTkrAAAAAAqJYgUAAABAIVGsAAAAACgkihUAAAAAhUSxAgAAAKCQKFYAAAAAFNL/B2qy0L55xGwSAAAAAElFTkSuQmCC\n", + "text/plain": [ + "
" + ] + }, + "metadata": { + "needs_background": "light" + }, + "output_type": "display_data" + } + ], + "source": [ + "groups = learner_r._classes\n", + "\n", + "alpha = 1\n", + "linewidth = 2\n", + "bins = 30\n", + "for group,idx in sorted(groups.items(), key=lambda x: x[1]):\n", + " plt.figure(figsize=(12,8))\n", + " plt.hist(cate_t[:,idx], alpha=alpha, bins=bins, label='T Learner ({})'.format(group),\n", + " histtype='step', linewidth=linewidth)\n", + " plt.hist(cate_x[:,idx], alpha=alpha, bins=bins, label='X Learner ({})'.format(group),\n", + " histtype='step', linewidth=linewidth)\n", + " plt.hist(cate_r[:,idx], alpha=alpha, bins=bins, label='R Learner ({})'.format(group),\n", + " histtype='step', linewidth=linewidth)\n", + " plt.vlines(cate_s[0,idx], 0, plt.axes().get_ylim()[1], label='S Learner ({})'.format(group),\n", + " linestyles='dotted', linewidth=linewidth)\n", + " plt.title('Distribution of CATE Predictions for {}'.format(group))\n", + " plt.xlabel('Individual Treatment Effect (ITE/CATE)')\n", + " plt.ylabel('# of Samples')\n", + " _=plt.legend()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.8" + }, + "toc": { + "base_numbering": 1, + "nav_menu": { + "height": "174px", + "width": "252px" + }, + "number_sections": false, + "sideBar": true, + "skip_h1_title": false, + "title_cell": "Table of Contents", + "title_sidebar": "Contents", + "toc_cell": false, + "toc_position": { + "height": "calc(100% - 180px)", + "left": "10px", + "top": "150px", + "width": "203px" + }, + "toc_section_display": "block", + "toc_window_display": true + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/requirements.txt b/requirements.txt index c3fe5589..535242bf 100644 --- a/requirements.txt +++ b/requirements.txt @@ -9,3 +9,4 @@ statsmodels>=0.9.0 seaborn Cython xgboost==0.82 +future diff --git a/tests/const.py b/tests/const.py index cef0241f..b4b9e52e 100644 --- a/tests/const.py +++ b/tests/const.py @@ -6,6 +6,6 @@ SCORE_COL = 'score' GROUP_COL = 'group' -CONTROL_NAME = 'c' +CONTROL_NAME = 'control' TREATMENT_NAMES = [CONTROL_NAME, 'treatment1', 'treatment2', 'treatment3'] CONVERSION = 'conversion' diff --git a/tests/test_meta_learners.py b/tests/test_meta_learners.py index 55010018..086f3c45 100644 --- a/tests/test_meta_learners.py +++ b/tests/test_meta_learners.py @@ -50,7 +50,7 @@ def test_BaseSLearner(generate_regression_data): learner = BaseSLearner(learner=LinearRegression()) # check the accuracy of the ATE estimation - ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y) + ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True) assert (ate_p >= lb) and (ate_p <= ub) assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD @@ -61,7 +61,7 @@ def test_BaseSRegressor(generate_regression_data): learner = BaseSRegressor(learner=XGBRegressor()) # check the accuracy of the ATE estimation - ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y) + ate_p, lb, ub = learner.estimate_ate(X=X, treatment=treatment, y=y, return_ci=True, n_bootstraps=10) assert (ate_p >= lb) and (ate_p <= ub) assert ape(tau.mean(), ate_p) < ERROR_THRESHOLD @@ -229,7 +229,7 @@ def test_BaseSClassifier(generate_classification_data): cumgain = get_cumgain(auuc_metrics, outcome_col=CONVERSION, treatment_col='W', - steps=20) + steps=15) # Check if the cumulative gain when using the model's prediction is # higher than it would be under random targeting @@ -264,7 +264,7 @@ def test_BaseTClassifier(generate_classification_data): cumgain = get_cumgain(auuc_metrics, outcome_col=CONVERSION, treatment_col='W', - steps=20) + steps=15) # Check if the cumulative gain when using the model's prediction is # higher than it would be under random targeting @@ -287,9 +287,10 @@ def test_BaseXClassifier(generate_classification_data): test_size=0.2, random_state=RANDOM_SEED) - uplift_model = BaseXClassifier(learner=XGBRegressor(), - control_outcome_learner=XGBClassifier(), - treatment_outcome_learner=XGBClassifier()) + uplift_model = BaseXClassifier(control_outcome_learner=XGBClassifier(), + control_effect_learner=XGBRegressor(), + treatment_outcome_learner=XGBClassifier(), + treatment_effect_learner=XGBRegressor()) uplift_model.fit(X=df_train[x_names].values, treatment=df_train['treatment_group_key'].values, @@ -305,7 +306,7 @@ def test_BaseXClassifier(generate_classification_data): cumgain = get_cumgain(auuc_metrics, outcome_col=CONVERSION, treatment_col='W', - steps=20) + steps=15) # Check if the cumulative gain when using the model's prediction is # higher than it would be under random targeting @@ -328,8 +329,8 @@ def test_BaseRClassifier(generate_classification_data): test_size=0.2, random_state=RANDOM_SEED) - uplift_model = BaseRClassifier(learner=XGBRegressor(), - outcome_learner=XGBClassifier()) + uplift_model = BaseRClassifier(outcome_learner=XGBClassifier(), + effect_learner=XGBRegressor()) uplift_model.fit(X=df_train[x_names].values, p=df_train['propensity_score'].values, @@ -345,7 +346,7 @@ def test_BaseRClassifier(generate_classification_data): cumgain = get_cumgain(auuc_metrics, outcome_col=CONVERSION, treatment_col='W', - steps=20) + steps=15) # Check if the cumulative gain when using the model's prediction is # higher than it would be under random targeting