diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 4a3b5d56..81560217 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -57,6 +57,7 @@ jobs: wget ${UCI_DB}/statlog/german/german.data -P aif360/data/raw/german/ wget ${UCI_DB}/statlog/german/german.doc -P aif360/data/raw/german/ wget ${PROPUBLICA_GH}/compas-scores-two-years.csv -P aif360/data/raw/compas/ + (cd aif360/data/raw/meps;Rscript generate_data.R <<< y) - name: Lint with flake8 run: | diff --git a/aif360/sklearn/datasets/__init__.py b/aif360/sklearn/datasets/__init__.py index cd475d14..525b1431 100644 --- a/aif360/sklearn/datasets/__init__.py +++ b/aif360/sklearn/datasets/__init__.py @@ -8,7 +8,8 @@ processing steps, when placed before an ``aif360.sklearn`` step in a Pipeline, will cause errors. """ -from aif360.sklearn.datasets.utils import * -from aif360.sklearn.datasets.openml_datasets import * +from aif360.sklearn.datasets.utils import standardize_dataset, NumericConversionWarning +from aif360.sklearn.datasets.openml_datasets import fetch_adult, fetch_german, fetch_bank from aif360.sklearn.datasets.compas_dataset import fetch_compas -from aif360.sklearn.datasets.tempeh_datasets import * +from aif360.sklearn.datasets.meps_datasets import fetch_meps +from aif360.sklearn.datasets.tempeh_datasets import fetch_lawschool_gpa diff --git a/aif360/sklearn/datasets/compas_dataset.py b/aif360/sklearn/datasets/compas_dataset.py index c909692d..c1594391 100644 --- a/aif360/sklearn/datasets/compas_dataset.py +++ b/aif360/sklearn/datasets/compas_dataset.py @@ -8,13 +8,14 @@ # cache location DATA_HOME_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'raw') -COMPAS_URL = 'https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv' +COMPAS_URL = 'https://raw.githubusercontent.com/propublica/compas-analysis/bafff5da3f2e45eca6c2d5055faad269defd135a/compas-scores-two-years.csv' +COMPAS_VIOLENT_URL = 'https://raw.githubusercontent.com/propublica/compas-analysis/bafff5da3f2e45eca6c2d5055faad269defd135a/compas-scores-two-years-violent.csv' -def fetch_compas(data_home=None, binary_race=False, +def fetch_compas(subset='all', *, data_home=None, cache=True, binary_race=False, usecols=['sex', 'age', 'age_cat', 'race', 'juv_fel_count', 'juv_misd_count', 'juv_other_count', 'priors_count', 'c_charge_degree', 'c_charge_desc'], - dropcols=[], numeric_only=False, dropna=True): + dropcols=None, numeric_only=False, dropna=True): """Load the COMPAS Recidivism Risk Scores dataset. Optionally binarizes 'race' to 'Caucasian' (privileged) or @@ -28,9 +29,14 @@ def fetch_compas(data_home=None, binary_race=False, 'Female and 0 for 'Male' -- opposite the convention of other datasets. Args: + subset ({'all' or 'violent'}): Use the violent recidivism or full + version of the dataset. Note: 'violent' is not a strict subset of + 'all' -- there are four samples in 'violent' which do not show up in + 'all'. data_home (string, optional): Specify another download and cache folder for the datasets. By default all AIF360 datasets are stored in 'aif360/sklearn/data/raw' subfolders. + cache (bool): Whether to cache downloaded datasets. binary_race (bool, optional): Filter only White and Black defendants. usecols (single label or list-like, optional): Feature column(s) to keep. All others are dropped. @@ -43,14 +49,20 @@ def fetch_compas(data_home=None, binary_race=False, namedtuple: Tuple containing X and y for the COMPAS dataset accessible by index or name. """ + if subset not in {'violent', 'all'}: + raise ValueError("subset must be either 'violent' or 'all'; cannot be " + f"{subset}") + + data_url = COMPAS_VIOLENT_URL if subset == 'violent' else COMPAS_URL cache_path = os.path.join(data_home or DATA_HOME_DEFAULT, - os.path.basename(COMPAS_URL)) - if os.path.isfile(cache_path): + os.path.basename(data_url)) + if cache and os.path.isfile(cache_path): df = pd.read_csv(cache_path, index_col='id') else: - df = pd.read_csv(COMPAS_URL, index_col='id') - os.makedirs(os.path.dirname(cache_path), exist_ok=True) - df.to_csv(cache_path) + df = pd.read_csv(data_url, index_col='id') + if cache: + os.makedirs(os.path.dirname(cache_path), exist_ok=True) + df.to_csv(cache_path) # Perform the same preprocessing as the original analysis: # https://github.com/propublica/compas-analysis/blob/master/Compas%20Analysis.ipynb @@ -58,11 +70,18 @@ def fetch_compas(data_home=None, binary_race=False, & (df.days_b_screening_arrest >= -30) & (df.is_recid != -1) & (df.c_charge_degree != 'O') - & (df.score_text != 'N/A')] + & (df['score_text' if subset == 'all' else 'v_score_text'] != 'N/A')] for col in ['sex', 'age_cat', 'race', 'c_charge_degree', 'c_charge_desc']: df[col] = df[col].astype('category') + # Misdemeanor < Felony + df.c_charge_degree = df.c_charge_degree.cat.reorder_categories( + ['M', 'F'], ordered=True) + # 'Less than 25' < '25 - 45' < 'Greater than 45' + df.age_cat = df.age_cat.cat.reorder_categories( + ['Less than 25', '25 - 45', 'Greater than 45'], ordered=True) + # 'Survived' < 'Recidivated' cats = ['Survived', 'Recidivated'] df.two_year_recid = df.two_year_recid.replace([0, 1], cats).astype('category') diff --git a/aif360/sklearn/datasets/meps_datasets.py b/aif360/sklearn/datasets/meps_datasets.py new file mode 100644 index 00000000..1f148bb7 --- /dev/null +++ b/aif360/sklearn/datasets/meps_datasets.py @@ -0,0 +1,132 @@ +from io import BytesIO +import os +from zipfile import ZipFile + +import pandas as pd +import requests + +from aif360.sklearn.datasets.utils import standardize_dataset + + +# cache location +DATA_HOME_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)), + '..', 'data', 'raw') +MEPS_URL = "https://meps.ahrq.gov/mepsweb/data_files/pufs" +PROMPT = """ +By using this function you acknowledge the responsibility for reading and +abiding by any copyright/usage rules and restrictions as stated on the MEPS web +site (https://meps.ahrq.gov/data_stats/data_use.jsp). + +Continue [y/n]? > """ + +def fetch_meps(panel, *, accept_terms=None, data_home=None, cache=True, + usecols=['REGION', 'AGE', 'SEX', 'RACE', 'MARRY', 'FTSTU', + 'ACTDTY', 'HONRDC', 'RTHLTH', 'MNHLTH', 'HIBPDX', + 'CHDDX', 'ANGIDX', 'MIDX', 'OHRTDX', 'STRKDX', 'EMPHDX', + 'CHBRON', 'CHOLDX', 'CANCERDX', 'DIABDX', 'JTPAIN', + 'ARTHDX', 'ARTHTYPE', 'ASTHDX', 'ADHDADDX', 'PREGNT', + 'WLKLIM', 'ACTLIM', 'SOCLIM', 'COGLIM', 'DFHEAR42', + 'DFSEE42', 'ADSMOK42', 'PCS42', 'MCS42', 'K6SUM42', + 'PHQ242', 'EMPST', 'POVCAT', 'INSCOV'], + dropcols=None, numeric_only=False, dropna=True): + """Load the Medical Expenditure Panel Survey (MEPS) dataset. + + Note: + For descriptions of the dataset features, see the `data codebook + `_. + + Args: + panel ({19, 20, 21}): Panel number (only 19, 20, and 21 are currently + supported). + accept_terms (bool, optional): Bypass terms prompt. Note: by setting + this to ``True``, you acknowledge responsibility for reading and + accepting the MEPS usage terms. + data_home (string, optional): Specify another download and cache folder + for the datasets. By default all AIF360 datasets are stored in + 'aif360/sklearn/data/raw' subfolders. + cache (bool): Whether to cache downloaded datasets. + usecols (single label or list-like, optional): Feature column(s) to + keep. All others are dropped. + dropcols (single label or list-like, optional): Feature column(s) to + drop. + numeric_only (bool): Drop all non-numeric feature columns. + dropna (bool): Drop rows with NAs. + + Returns: + namedtuple: Tuple containing X and y for the MEPS dataset accessible by + index or name. + """ + if panel not in {19, 20, 21}: + raise ValueError("only panels 19, 20, and 21 are currently supported.") + + fname = 'h192' if panel == 21 else 'h181' + cache_path = os.path.join(data_home or DATA_HOME_DEFAULT, fname + '.csv') + if cache and os.path.isfile(cache_path): + df = pd.read_csv(cache_path) + else: + # skip prompt if user chooses + accept = accept_terms or input(PROMPT) + if accept != 'y' and accept != True: + raise PermissionError("Terms not agreed.") + rawz = requests.get(os.path.join(MEPS_URL, fname + 'ssp.zip')).content + with ZipFile(BytesIO(rawz)) as zf: + with zf.open(fname + '.ssp') as ssp: + df = pd.read_sas(ssp, format='xport') + # TODO: does this cause any differences? + # reduce storage size + df = df.apply(pd.to_numeric, errors='ignore', downcast='integer') + if cache: + os.makedirs(os.path.dirname(cache_path), exist_ok=True) + df.to_csv(cache_path, index=None) + # restrict to correct panel + df = df[df['PANEL'] == panel] + # change all 15s to 16s if panel == 21 + yr = 16 if panel == 21 else 15 + + # non-Hispanic Whites are marked as WHITE; all others as NON-WHITE + df['RACEV2X'] = (df['HISPANX'] == 2) & (df['RACEV2X'] == 1) + + # rename all columns that are panel/round-specific + df = df.rename(columns={ + 'FTSTU53X': 'FTSTU', 'ACTDTY53': 'ACTDTY', 'HONRDC53': 'HONRDC', + 'RTHLTH53': 'RTHLTH', 'MNHLTH53': 'MNHLTH', 'CHBRON53': 'CHBRON', + 'JTPAIN53': 'JTPAIN', 'PREGNT53': 'PREGNT', 'WLKLIM53': 'WLKLIM', + 'ACTLIM53': 'ACTLIM', 'SOCLIM53': 'SOCLIM', 'COGLIM53': 'COGLIM', + 'EMPST53': 'EMPST', 'REGION53': 'REGION', 'MARRY53X': 'MARRY', + 'AGE53X': 'AGE', f'POVCAT{yr}': 'POVCAT', f'INSCOV{yr}': 'INSCOV', + f'PERWT{yr}F': 'PERWT', 'RACEV2X': 'RACE'}) + + df.loc[df.AGE < 0, 'AGE'] = None # set invalid ages to NaN + cat_cols = ['REGION', 'SEX', 'RACE', 'MARRY', 'FTSTU', 'ACTDTY', 'HONRDC', + 'RTHLTH', 'MNHLTH', 'HIBPDX', 'CHDDX', 'ANGIDX', 'MIDX', + 'OHRTDX', 'STRKDX', 'EMPHDX', 'CHBRON', 'CHOLDX', 'CANCERDX', + 'DIABDX', 'JTPAIN', 'ARTHDX', 'ARTHTYPE', 'ASTHDX', 'ADHDADDX', + 'PREGNT', 'WLKLIM', 'ACTLIM', 'SOCLIM', 'COGLIM', 'DFHEAR42', + 'DFSEE42', 'ADSMOK42', 'PHQ242', 'EMPST', 'POVCAT', 'INSCOV', + # NOTE: education tracking seems to have changed between panels. 'EDUYRDG' + # was used for panel 19, 'EDUCYR' and 'HIDEG' were used for panels 20 & 21. + # User may change usecols to include these manually. + 'EDUCYR', 'HIDEG'] + if panel == 19: + cat_cols += ['EDUYRDG'] + + for col in cat_cols: + df[col] = df[col].astype('category') + thresh = 0 if col in ['REGION', 'MARRY', 'ASTHDX'] else -1 + na_cats = [c for c in df[col].cat.categories if c < thresh] + df[col] = df[col].cat.remove_categories(na_cats) # set NaN cols to NaN + + df['SEX'] = df['SEX'].cat.rename_categories({1: 'Male', 2: 'Female'}) + df['RACE'] = df['RACE'].cat.rename_categories({False: 'Non-White', True: 'White'}) + df['RACE'] = df['RACE'].cat.reorder_categories(['Non-White', 'White'], ordered=True) + + # Compute UTILIZATION, binarize it to 0 (< 10) and 1 (>= 10) + cols = [f'OBTOTV{yr}', f'OPTOTV{yr}', f'ERTOT{yr}', f'IPNGTD{yr}', f'HHTOTD{yr}'] + util = df[cols].sum(axis=1) + df['UTILIZATION'] = pd.cut(util, [min(util)-1, 10, max(util)+1], right=False, + labels=['< 10 Visits', '>= 10 Visits'])#['low', 'high']) + + return standardize_dataset(df, prot_attr='RACE', target='UTILIZATION', + sample_weight='PERWT', usecols=usecols, + dropcols=dropcols, numeric_only=numeric_only, + dropna=dropna) diff --git a/aif360/sklearn/datasets/openml_datasets.py b/aif360/sklearn/datasets/openml_datasets.py index 003e80f7..eb447865 100644 --- a/aif360/sklearn/datasets/openml_datasets.py +++ b/aif360/sklearn/datasets/openml_datasets.py @@ -10,30 +10,8 @@ DATA_HOME_DEFAULT = os.path.join(os.path.dirname(os.path.abspath(__file__)), '..', 'data', 'raw') -def to_dataframe(data): - """Format an OpenML dataset Bunch as a DataFrame with categorical features - if needed. - - Args: - data (Bunch): Dict-like object containing ``data``, ``feature_names`` - and, optionally, ``categories`` attributes. Note: ``data`` should - contain both X and y data. - - Returns: - pandas.DataFrame: A DataFrame containing all data, including target, - with categorical features converted to 'category' dtypes. - """ - def categorize(item): - return cats[int(item)] if not pd.isna(item) else item - - df = pd.DataFrame(data['data'], columns=data['feature_names']) - for col, cats in data['categories'].items(): - df[col] = df[col].apply(categorize).astype('category') - - return df - -def fetch_adult(subset='all', data_home=None, binary_race=True, usecols=[], - dropcols=[], numeric_only=False, dropna=True): +def fetch_adult(subset='all', *, data_home=None, cache=True, binary_race=True, + usecols=None, dropcols=None, numeric_only=False, dropna=True): """Load the Adult Census Income Dataset. Binarizes 'race' to 'White' (privileged) or 'Non-white' (unprivileged). The @@ -52,11 +30,13 @@ def fetch_adult(subset='all', data_home=None, binary_race=True, usecols=[], data_home (string, optional): Specify another download and cache folder for the datasets. By default all AIF360 datasets are stored in 'aif360/sklearn/data/raw' subfolders. - binary_race (bool, optional): Group all non-white races together. - usecols (single label or list-like, optional): Feature column(s) to - keep. All others are dropped. - dropcols (single label or list-like, optional): Feature column(s) to - drop. + cache (bool): Whether to cache downloaded datasets. + binary_race (bool, optional): Group all non-white races together. Only + the protected attribute is affected, not the feature column, unless + numeric_only is ``True``. + usecols (list-like, optional): Feature column(s) to keep. All others are + dropped. + dropcols (list-like, optional): Feature column(s) to drop. numeric_only (bool): Drop all non-numeric feature columns. dropna (bool): Drop rows with NAs. @@ -79,30 +59,32 @@ def fetch_adult(subset='all', data_home=None, binary_race=True, usecols=[], if subset not in {'train', 'test', 'all'}: raise ValueError("subset must be either 'train', 'test', or 'all'; " "cannot be {}".format(subset)) - df = to_dataframe(fetch_openml(data_id=1590, target_column=None, - data_home=data_home or DATA_HOME_DEFAULT, - as_frame=False)) + df = fetch_openml(data_id=1590, data_home=data_home or DATA_HOME_DEFAULT, + cache=cache, as_frame=True).frame if subset == 'train': df = df.iloc[16281:] elif subset == 'test': df = df.iloc[:16281] df = df.rename(columns={'class': 'annual-income'}) # more descriptive name - df['annual-income'] = df['annual-income'].cat.as_ordered() # '<=50K' < '>50K' + df['annual-income'] = df['annual-income'].cat.reorder_categories( + ['<=50K', '>50K'], ordered=True) # binarize protected attributes - if binary_race: - df.race = df.race.cat.set_categories(['Non-white', 'White'], - ordered=True).fillna('Non-white') - df.sex = df.sex.cat.as_ordered() # 'Female' < 'Male' - - return standardize_dataset(df, prot_attr=['race', 'sex'], - target='annual-income', sample_weight='fnlwgt', - usecols=usecols, dropcols=dropcols, - numeric_only=numeric_only, dropna=dropna) - -def fetch_german(data_home=None, binary_age=True, usecols=[], dropcols=[], - numeric_only=False, dropna=True): + race = df.race.cat.set_categories(['Non-white', 'White'], ordered=True) + race = race.fillna('Non-white') if binary_race else 'race' + if numeric_only and binary_race: + df.race = race + race = 'race' + df.sex = df.sex.cat.reorder_categories(['Female', 'Male'], ordered=True) + + return standardize_dataset(df, prot_attr=[race, 'sex'], + target='annual-income', sample_weight='fnlwgt', + usecols=usecols, dropcols=dropcols, + numeric_only=numeric_only, dropna=dropna) + +def fetch_german(*, data_home=None, cache=True, binary_age=True, usecols=None, + dropcols=None, numeric_only=False, dropna=True): """Load the German Credit Dataset. Protected attributes are 'sex' ('male' is privileged and 'female' is @@ -119,12 +101,13 @@ def fetch_german(data_home=None, binary_age=True, usecols=[], dropcols=[], data_home (string, optional): Specify another download and cache folder for the datasets. By default all AIF360 datasets are stored in 'aif360/sklearn/data/raw' subfolders. + cache (bool): Whether to cache downloaded datasets. binary_age (bool, optional): If ``True``, split protected attribute, 'age', into 'aged' (privileged) and 'youth' (unprivileged). The 'age' feature remains continuous. - usecols (single label or list-like, optional): Column name(s) to keep. - All others are dropped. - dropcols (single label or list-like, optional): Column name(s) to drop. + usecols (list-like, optional): Column name(s) to keep. All others are + dropped. + dropcols (list-like, optional): Column name(s) to drop. numeric_only (bool): Drop all non-numeric feature columns. dropna (bool): Drop rows with NAs. @@ -158,12 +141,12 @@ def fetch_german(data_home=None, binary_age=True, usecols=[], dropcols=[], ... pos_label='good') 0.9483094846144106 """ - df = to_dataframe(fetch_openml(data_id=31, target_column=None, - data_home=data_home or DATA_HOME_DEFAULT, - as_frame=False)) + df = fetch_openml(data_id=31, data_home=data_home or DATA_HOME_DEFAULT, + cache=cache, as_frame=True).frame df = df.rename(columns={'class': 'credit-risk'}) # more descriptive name - df['credit-risk'] = df['credit-risk'].cat.as_ordered() # 'bad' < 'good' + df['credit-risk'] = df['credit-risk'].cat.reorder_categories( + ['bad', 'good'], ordered=True) # binarize protected attribute (but not corresponding feature) age = (pd.cut(df.age, [0, 25, 100], @@ -175,18 +158,18 @@ def fetch_german(data_home=None, binary_age=True, usecols=[], dropcols=[], personal_status = df.pop('personal_status').str.split(expand=True) personal_status.columns = ['sex', 'marital_status'] df = df.join(personal_status.astype('category')) - df.sex = df.sex.cat.as_ordered() # 'female' < 'male' + df.sex = df.sex.cat.reorder_categories(['female', 'male'], ordered=True) - # 'no' < 'yes' - df.foreign_worker = df.foreign_worker.astype('category').cat.as_ordered() + df.foreign_worker = df.foreign_worker.astype('category').cat.set_categories( + ['no', 'yes'], ordered=True) return standardize_dataset(df, prot_attr=['sex', age, 'foreign_worker'], target='credit-risk', usecols=usecols, dropcols=dropcols, numeric_only=numeric_only, dropna=dropna) -def fetch_bank(data_home=None, percent10=False, usecols=[], dropcols='duration', - numeric_only=False, dropna=False): +def fetch_bank(*, data_home=None, cache=True, percent10=False, usecols=None, + dropcols=['duration'], numeric_only=False, dropna=False): """Load the Bank Marketing Dataset. The protected attribute is 'age' (left as continuous). The outcome variable @@ -200,10 +183,11 @@ def fetch_bank(data_home=None, percent10=False, usecols=[], dropcols='duration', data_home (string, optional): Specify another download and cache folder for the datasets. By default all AIF360 datasets are stored in 'aif360/sklearn/data/raw' subfolders. + cache (bool): Whether to cache downloaded datasets. percent10 (bool, optional): Download the reduced version (10% of data). - usecols (single label or list-like, optional): Column name(s) to keep. - All others are dropped. - dropcols (single label or list-like, optional): Column name(s) to drop. + usecols (list-like, optional): Column name(s) to keep. All others are + dropped. + dropcols (list-like, optional): Column name(s) to drop. numeric_only (bool): Drop all non-numeric feature columns. dropna (bool): Drop rows with NAs. Note: this is False by default for this dataset. @@ -229,20 +213,21 @@ def fetch_bank(data_home=None, percent10=False, usecols=[], dropcols='duration', (45211, 6) """ # TODO: this seems to be an old version - df = to_dataframe(fetch_openml(data_id=1558 if percent10 else 1461, - data_home=data_home or DATA_HOME_DEFAULT, - target_column=None, as_frame=False)) + df = fetch_openml(data_id=1558 if percent10 else 1461, data_home=data_home + or DATA_HOME_DEFAULT, cache=cache, as_frame=True).frame df.columns = ['age', 'job', 'marital', 'education', 'default', 'balance', 'housing', 'loan', 'contact', 'day', 'month', 'duration', 'campaign', 'pdays', 'previous', 'poutcome', 'deposit'] # remap target df.deposit = df.deposit.map({'1': 'no', '2': 'yes'}).astype('category') - df.deposit = df.deposit.cat.as_ordered() # 'no' < 'yes' + df.deposit = df.deposit.cat.set_categories(['no', 'yes'], ordered=True) + # replace 'unknown' marker with NaN - df.apply(lambda s: s.cat.remove_categories('unknown', inplace=True) - if hasattr(s, 'cat') and 'unknown' in s.cat.categories else s) - # 'primary' < 'secondary' < 'tertiary' - df.education = df.education.astype('category').cat.as_ordered() + for col in df.select_dtypes('category'): + if 'unknown' in df[col].cat.categories: + df[col] = df[col].cat.remove_categories('unknown') + df.education = df.education.astype('category').cat.reorder_categories( + ['primary', 'secondary', 'tertiary'], ordered=True) return standardize_dataset(df, prot_attr='age', target='deposit', usecols=usecols, dropcols=dropcols, diff --git a/aif360/sklearn/datasets/tempeh_datasets.py b/aif360/sklearn/datasets/tempeh_datasets.py index 5416be80..cc44e1a3 100644 --- a/aif360/sklearn/datasets/tempeh_datasets.py +++ b/aif360/sklearn/datasets/tempeh_datasets.py @@ -4,8 +4,8 @@ from aif360.sklearn.datasets.utils import standardize_dataset -def fetch_lawschool_gpa(subset="all", usecols=[], dropcols=[], - numeric_only=False, dropna=False): +def fetch_lawschool_gpa(subset="all", *, usecols=None, dropcols=None, + numeric_only=False, dropna=True): """Load the Law School GPA dataset Note: @@ -21,7 +21,7 @@ def fetch_lawschool_gpa(subset="all", usecols=[], dropcols=[], dropcols (single label or list-like, optional): Feature column(s) to drop. numeric_only (bool): Drop all non-numeric feature columns. - dropna (bool): Drop rows with NAs. + dropna (bool): Drop rows with NAs. FIXME: NAs already dropped by tempeh Returns: namedtuple: Tuple containing X, y, and sample_weights for the Law School @@ -46,6 +46,9 @@ def fetch_lawschool_gpa(subset="all", usecols=[], dropcols=[], else: df = pd.concat([all_train, all_test], axis=0) - return standardize_dataset(df, prot_attr=['race'], target='zfygpa', - usecols=usecols, dropcols=dropcols, - numeric_only=numeric_only, dropna=dropna) + df.race = df.race.astype('category').cat.set_categories( + ['black', 'white'], ordered=True) + + return standardize_dataset(df, prot_attr='race', target='zfygpa', + usecols=usecols, dropcols=dropcols, + numeric_only=numeric_only, dropna=dropna) diff --git a/aif360/sklearn/datasets/utils.py b/aif360/sklearn/datasets/utils.py index d0973c36..90d465c5 100644 --- a/aif360/sklearn/datasets/utils.py +++ b/aif360/sklearn/datasets/utils.py @@ -3,68 +3,41 @@ import numpy as np import pandas as pd -from pandas.core.dtypes.common import is_list_like +from pandas.api.types import is_list_like, is_numeric_dtype -class ColumnAlreadyDroppedWarning(UserWarning): - """Warning used if a column is attempted to be dropped twice.""" +Dataset = namedtuple('Dataset', ['X', 'y']) +WeightedDataset = namedtuple('WeightedDataset', ['X', 'y', 'sample_weight']) -def check_already_dropped(labels, dropped_cols, name, dropped_by='numeric_only', - warn=True): - """Check if columns have already been dropped and return only those that - haven't. - - Args: - labels (label, pandas.Series, or list-like of labels/Series): Column - labels to check. - dropped_cols (set or pandas.Index): Columns that were already dropped. - name (str): Original arg that triggered the check (e.g. dropcols). - dropped_by (str, optional): Original arg that caused dropped_cols`` - (e.g. numeric_only). - warn (bool, optional): If ``True``, produces a - :class:`ColumnAlreadyDroppedWarning` if there are columns in the - intersection of dropped_cols and labels. - - Returns: - list: Columns in labels which are not in dropped_cols. - """ - if isinstance(labels, pd.Series) or not is_list_like(labels): - labels = [labels] - str_labels = [c for c in labels if not isinstance(c, pd.Series)] - try: - already_dropped = dropped_cols.intersection(str_labels) - if isinstance(already_dropped, pd.MultiIndex): - raise TypeError # list of lists results in MultiIndex - except TypeError as e: - raise TypeError("Only labels or Series are allowed for {}. Got types:\n" - "{}".format(name, [type(c) for c in labels])) - if warn and any(already_dropped): - warnings.warn("Some column labels from `{}` were already dropped by " - "`{}`:\n{}".format(name, dropped_by, already_dropped.tolist()), - ColumnAlreadyDroppedWarning, stacklevel=2) - return [c for c in labels if isinstance(c, pd.Series) - or c not in already_dropped] +class NumericConversionWarning(UserWarning): + """Warning used if protected attribute or target is unable to be converted + automatically to a numeric type.""" def standardize_dataset(df, *, prot_attr, target, sample_weight=None, - usecols=[], dropcols=[], numeric_only=False, - dropna=True): + usecols=None, dropcols=None, numeric_only=False, dropna=True): """Separate data, targets, and possibly sample weights and populate protected attributes as sample properties. Args: - df (pandas.DataFrame): DataFrame with features and target together. - prot_attr (label, pandas.Series, or list-like of labels/Series): Single - label, Series, or list-like of labels/Series corresponding to - protected attribute columns. Even if these are dropped from the - features, they remain in the index. If a Series is provided, it will - be added to the index but not show up in the features. - target (label, pandas.Series, or list-like of labels/Series): Column - label(s) or values of the target (outcome) variable. - sample_weight (single label, optional): Name of the column containing - sample weights. - usecols (single label or list-like, optional): Column(s) to keep. All - others are dropped. - dropcols (single label or list-like, optional): Column(s) to drop. + df (pandas.DataFrame): DataFrame with features and, optionally, target. + prot_attr (label or array-like or list of labels/arrays): Label, array + of the same length as `df`, or a list containing any combination of + the two corresponding to protected attribute columns. Even if these + are dropped from the features, they remain in the index. Column(s) + indicated by label will be copied from `df`, not dropped. Column(s) + passed explicitly as arrays will not be added to features. + target (label or array-like or list of labels/arrays): Label, array of + the same length as `df`, or a list containing any combination of the + two corresponding to the target (outcome) variable. Column(s) + indicated by label will be dropped from features. + sample_weight (single label or array-like, optional): Name of the column + containing sample weights or an array of sample weights of the same + length as `df`. If a label is passed, the column is dropped from + features. Note: the index of a passed Series will be ignored. + usecols (list-like, optional): Column(s) to keep. All others are + dropped. + dropcols (list-like, optional): Column(s) to drop. Missing labels are + ignored. numeric_only (bool): Drop all non-numeric, non-binary feature columns. dropna (bool): Drop rows with NAs. @@ -81,8 +54,8 @@ def standardize_dataset(df, *, prot_attr, target, sample_weight=None, * **sample_weight** (`pandas.Series`, optional) -- Sample weights. Note: - The order of execution for the dropping parameters is: numeric_only -> - usecols -> dropcols -> dropna. + The order of execution for the dropping parameters is: usecols -> + dropcols -> numeric_only -> dropna. Examples: >>> import pandas as pd @@ -101,45 +74,54 @@ def standardize_dataset(df, *, prot_attr, target, sample_weight=None, >>> X, y = standardize_dataset(df, prot_attr=0, target=5) >>> X_tr, X_te, y_tr, y_te = train_test_split(X, y) """ - orig_cols = df.columns if numeric_only: for col in df.select_dtypes('category'): if df[col].cat.ordered: df[col] = df[col].factorize(sort=True)[0] df[col] = df[col].replace(-1, np.nan) - df = df.select_dtypes(['number', 'bool']) - nonnumeric = orig_cols.difference(df.columns) - prot_attr = check_already_dropped(prot_attr, nonnumeric, 'prot_attr') - if len(prot_attr) == 0: - raise ValueError("At least one protected attribute must be present.") - df = df.set_index(prot_attr, drop=False, append=True) + # protected attribute(s) + df = df.set_index(prot_attr, drop=False) + pa = df.index - target = check_already_dropped(target, nonnumeric, 'target') - if len(target) == 0: - raise ValueError("At least one target must be present.") - y = pd.concat([df.pop(t) if not isinstance(t, pd.Series) else - t.set_axis(df.index, inplace=False) for t in target], axis=1) - y = y.squeeze() # maybe Series + # target(s) + df = df.set_index(target, drop=True) # utilize set_index logic for mixed types + y = df.index.to_frame().squeeze() + df.index = y.index = pa + + # sample weight + if sample_weight is not None: + sw = pd.Series(sample_weight) if is_list_like(sample_weight) else \ + df.pop(sample_weight) + sw.index = pa # Column-wise drops - orig_cols = df.columns if usecols: - usecols = check_already_dropped(usecols, nonnumeric, 'usecols') - df = df[usecols] - unused = orig_cols.difference(df.columns) - - dropcols = check_already_dropped(dropcols, nonnumeric, 'dropcols', warn=False) - dropcols = check_already_dropped(dropcols, unused, 'dropcols', 'usecols', False) - df = df.drop(columns=dropcols) + if not is_list_like(usecols): + usecols = [usecols] # ensure output is DataFrame, not Series + df = df.loc[:, usecols] + if dropcols: + df = df.drop(columns=dropcols, errors='ignore') + if numeric_only: + df = df.select_dtypes(['number', 'bool']) + # warn if nonnumeric prot_attr or target but proceed + if any(not is_numeric_dtype(dt) for dt in pa.to_frame().dtypes): + warnings.warn(f"index contains non-numeric:\n{pa.to_frame().dtypes}", + category=NumericConversionWarning) + if any(not is_numeric_dtype(dt) for dt in y.to_frame().dtypes): + warnings.warn(f"y contains non-numeric column:\n{y.to_frame().dtypes}", + category=NumericConversionWarning) # Index-wise drops if dropna: - notna = df.notna().all(axis=1) & y.notna() + notna = df.notna().all(axis=1) & y.notna() & pa.to_frame().notna().all(axis=1) + if sample_weight is not None: + notna &= sw.notna() + sw = sw.loc[notna] df = df.loc[notna] y = y.loc[notna] - if sample_weight is not None: - return namedtuple('WeightedDataset', ['X', 'y', 'sample_weight'])( - df, y, df.pop(sample_weight).rename('sample_weight')) - return namedtuple('Dataset', ['X', 'y'])(df, y) + for col in df.select_dtypes('category'): + df[col] = df[col].cat.remove_unused_categories() + + return Dataset(df, y) if sample_weight is None else WeightedDataset(df, y, sw) diff --git a/aif360/sklearn/inprocessing/grid_search_reduction.py b/aif360/sklearn/inprocessing/grid_search_reduction.py index 4af7762d..786635c6 100644 --- a/aif360/sklearn/inprocessing/grid_search_reduction.py +++ b/aif360/sklearn/inprocessing/grid_search_reduction.py @@ -148,9 +148,10 @@ def fit(self, X, y): if self.drop_prot_attr: X = X.drop(self.prot_attr, axis=1) - le = LabelEncoder() - y = le.fit_transform(y) - self.classes_ = le.classes_ + if isinstance(self.model_.constraints, red.ClassificationMoment): + le = LabelEncoder() + y = le.fit_transform(y) + self.classes_ = le.classes_ self.model_.fit(X, y, sensitive_features=A) diff --git a/aif360/sklearn/utils.py b/aif360/sklearn/utils.py index 604b1202..03e1cb43 100644 --- a/aif360/sklearn/utils.py +++ b/aif360/sklearn/utils.py @@ -50,10 +50,15 @@ def check_groups(arr, prot_attr, ensure_binary=False): provided protected attributes are in the index. Args: - arr (:class:`pandas.Series` or :class:`pandas.DataFrame`): A Pandas - object containing protected attribute information in the index. - prot_attr (single label or list-like): Protected attribute(s). If - ``None``, all protected attributes in arr are used. + arr (array-like): Either a Pandas object containing protected attribute + information in the index or array-like with explicit protected + attribute array(s) for `prot_attr`. + prot_attr (label or array-like or list of labels/arrays): Protected + attribute(s). If contains labels, arr must include these in its + index. If ``None``, all protected attributes in ``arr.index`` are + used. Can also be 1D array-like of the same length as arr or a + list of a combination of such arrays and labels in which case, arr + may not necessarily be a Pandas type. ensure_binary (bool): Raise an error if the resultant groups are not binary. @@ -62,32 +67,34 @@ def check_groups(arr, prot_attr, ensure_binary=False): * **groups** (:class:`pandas.Index`) -- Label (or tuple of labels) of protected attribute for each sample in arr. - * **prot_attr** (`list-like`) -- Modified input. If input is a + * **prot_attr** (`FrozenList`) -- Modified input. If input is a single label, returns single-item list. If input is ``None`` returns list of all protected attributes. """ - if not hasattr(arr, 'index'): - raise TypeError( - "Expected `Series` or `DataFrame`, got {} instead.".format( - type(arr).__name__)) - - all_prot_attrs = [name for name in arr.index.names if name] # not None or '' - if prot_attr is None: - prot_attr = all_prot_attrs - elif not is_list_like(prot_attr): - prot_attr = [prot_attr] - - if any(p not in arr.index.names for p in prot_attr): - raise ValueError("Some of the attributes provided are not present " - "in the dataset. Expected a subset of:\n{}\nGot:\n" - "{}".format(all_prot_attrs, prot_attr)) - - groups = arr.index.droplevel(list(set(arr.index.names) - set(prot_attr))) + arr_is_pandas = isinstance(arr, (pd.DataFrame, pd.Series)) + if prot_attr is None: # use all protected attributes provided in arr + if not arr_is_pandas: + raise TypeError("Expected `Series` or `DataFrame` for arr, got " + f"{type(arr).__name__} instead. Otherwise, pass " + "explicit prot_attr array(s).") + groups = arr.index + elif arr_is_pandas: + df = arr.index.to_frame() + groups = df.set_index(prot_attr).index # let pandas handle errors + else: # arr isn't pandas. might be okay if prot_attr is array-like + df = pd.DataFrame(index=[None]*len(arr)) # dummy to check lengths match + try: + groups = df.set_index(prot_attr).index + except KeyError as e: + raise TypeError("arr does not include protected attributes in the " + "index. Check if this got dropped or prot_attr is " + "formatted incorrectly.") from e + prot_attr = groups.names groups = groups.to_flat_index() n_unique = groups.nunique() if ensure_binary and n_unique != 2: - raise ValueError("Expected 2 protected attribute groups, got {}".format( - groups.unique() if n_unique > 5 else n_unique)) + raise ValueError("Expected 2 protected attribute groups, got " + f"{groups.unique() if n_unique > 5 else n_unique}") return groups, prot_attr diff --git a/docs/source/modules/datasets.rst b/docs/source/modules/datasets.rst index a36ebac7..5c11921a 100644 --- a/docs/source/modules/datasets.rst +++ b/docs/source/modules/datasets.rst @@ -38,3 +38,6 @@ Common datasets datasets.CompasDataset datasets.GermanDataset datasets.LawSchoolGPADataset + datasets.MEPSDataset19 + datasets.MEPSDataset20 + datasets.MEPSDataset21 diff --git a/docs/source/modules/sklearn.rst b/docs/source/modules/sklearn.rst index 2463be1b..6bd4f88c 100644 --- a/docs/source/modules/sklearn.rst +++ b/docs/source/modules/sklearn.rst @@ -28,15 +28,13 @@ Utils :toctree: generated/ :template: class.rst - datasets.ColumnAlreadyDroppedWarning + datasets.NumericConversionWarning .. autosummary:: :toctree: generated/ :template: base.rst - datasets.check_already_dropped datasets.standardize_dataset - datasets.to_dataframe Loaders ------- @@ -50,6 +48,7 @@ Loaders datasets.fetch_bank datasets.fetch_compas datasets.fetch_lawschool_gpa + datasets.fetch_meps :mod:`aif360.sklearn.metrics`: Fairness metrics =============================================== diff --git a/examples/sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb b/examples/sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb index ecf22c8c..6b4d05db 100644 --- a/examples/sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb +++ b/examples/sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb @@ -16,28 +16,12 @@ "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/Users/sohiniupadhyay/Desktop/AIF360/aif360/sklearn/inprocessing/grid_search_reduction.py:85: SyntaxWarning: \"is\" with a literal. Did you mean \"==\"?\n", - " if constraints is \"GroupLoss\":\n", - "/Users/sohiniupadhyay/Desktop/AIF360/aif360/sklearn/inprocessing/grid_search_reduction.py:94: SyntaxWarning: \"is\" with a literal. Did you mean \"==\"?\n", - " if loss is \"ZeroOne\":\n", - "/Users/sohiniupadhyay/Desktop/AIF360/aif360/sklearn/datasets/tempeh_datasets.py:38: SyntaxWarning: \"is\" with a literal. Did you mean \"==\"?\n", - " if subset is \"train\":\n", - "/Users/sohiniupadhyay/Desktop/AIF360/aif360/sklearn/datasets/tempeh_datasets.py:40: SyntaxWarning: \"is\" with a literal. Did you mean \"==\"?\n", - " elif subset is \"test\":\n" - ] - } - ], + "outputs": [], "source": [ - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pandas as pd\n", "\n", + "from sklearn.compose import make_column_transformer\n", "from sklearn.linear_model import LogisticRegression\n", "from sklearn.metrics import accuracy_score\n", "from sklearn.model_selection import GridSearchCV, train_test_split\n", @@ -93,7 +77,6 @@ " \n", " \n", " \n", - " \n", " age\n", " workclass\n", " education\n", @@ -109,7 +92,6 @@ " native-country\n", " \n", " \n", - " \n", " race\n", " sex\n", " \n", @@ -129,7 +111,6 @@ " \n", " \n", " \n", - " 0\n", " Non-white\n", " Male\n", " 25.0\n", @@ -139,7 +120,7 @@ " Never-married\n", " Machine-op-inspct\n", " Own-child\n", - " Non-white\n", + " Black\n", " Male\n", " 0.0\n", " 0.0\n", @@ -147,8 +128,7 @@ " United-States\n", " \n", " \n", - " 1\n", - " White\n", + " White\n", " Male\n", " 38.0\n", " Private\n", @@ -165,8 +145,6 @@ " United-States\n", " \n", " \n", - " 2\n", - " White\n", " Male\n", " 28.0\n", " Local-gov\n", @@ -183,7 +161,6 @@ " United-States\n", " \n", " \n", - " 3\n", " Non-white\n", " Male\n", " 44.0\n", @@ -193,7 +170,7 @@ " Married-civ-spouse\n", " Machine-op-inspct\n", " Husband\n", - " Non-white\n", + " Black\n", " Male\n", " 7688.0\n", " 0.0\n", @@ -201,7 +178,6 @@ " United-States\n", " \n", " \n", - " 5\n", " White\n", " Male\n", " 34.0\n", @@ -223,37 +199,37 @@ "" ], "text/plain": [ - " age workclass education education-num \\\n", - " race sex \n", - "0 Non-white Male 25.0 Private 11th 7.0 \n", - "1 White Male 38.0 Private HS-grad 9.0 \n", - "2 White Male 28.0 Local-gov Assoc-acdm 12.0 \n", - "3 Non-white Male 44.0 Private Some-college 10.0 \n", - "5 White Male 34.0 Private 10th 6.0 \n", + " age workclass education education-num \\\n", + "race sex \n", + "Non-white Male 25.0 Private 11th 7.0 \n", + "White Male 38.0 Private HS-grad 9.0 \n", + " Male 28.0 Local-gov Assoc-acdm 12.0 \n", + "Non-white Male 44.0 Private Some-college 10.0 \n", + "White Male 34.0 Private 10th 6.0 \n", "\n", - " marital-status occupation relationship \\\n", - " race sex \n", - "0 Non-white Male Never-married Machine-op-inspct Own-child \n", - "1 White Male Married-civ-spouse Farming-fishing Husband \n", - "2 White Male Married-civ-spouse Protective-serv Husband \n", - "3 Non-white Male Married-civ-spouse Machine-op-inspct Husband \n", - "5 White Male Never-married Other-service Not-in-family \n", + " marital-status occupation relationship race \\\n", + "race sex \n", + "Non-white Male Never-married Machine-op-inspct Own-child Black \n", + "White Male Married-civ-spouse Farming-fishing Husband White \n", + " Male Married-civ-spouse Protective-serv Husband White \n", + "Non-white Male Married-civ-spouse Machine-op-inspct Husband Black \n", + "White Male Never-married Other-service Not-in-family White \n", "\n", - " race sex capital-gain capital-loss hours-per-week \\\n", - " race sex \n", - "0 Non-white Male Non-white Male 0.0 0.0 40.0 \n", - "1 White Male White Male 0.0 0.0 50.0 \n", - "2 White Male White Male 0.0 0.0 40.0 \n", - "3 Non-white Male Non-white Male 7688.0 0.0 40.0 \n", - "5 White Male White Male 0.0 0.0 30.0 \n", + " sex capital-gain capital-loss hours-per-week \\\n", + "race sex \n", + "Non-white Male Male 0.0 0.0 40.0 \n", + "White Male Male 0.0 0.0 50.0 \n", + " Male Male 0.0 0.0 40.0 \n", + "Non-white Male Male 7688.0 0.0 40.0 \n", + "White Male Male 0.0 0.0 30.0 \n", "\n", - " native-country \n", - " race sex \n", - "0 Non-white Male United-States \n", - "1 White Male United-States \n", - "2 White Male United-States \n", - "3 Non-white Male United-States \n", - "5 White Male United-States " + " native-country \n", + "race sex \n", + "Non-white Male United-States \n", + "White Male United-States \n", + " Male United-States \n", + "Non-white Male United-States \n", + "White Male United-States " ] }, "execution_count": 2, @@ -266,14 +242,20 @@ "X.head()" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To match the old version, we also remap the \"race\" feature to \"White\"/\"Non-white\"," + ] + }, { "cell_type": "code", "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "# there is one unused category ('Never-worked') that was dropped during dropna\n", - "X.workclass.cat.remove_unused_categories(inplace=True)" + "X.race = X.race.cat.set_categories(['Non-white', 'White'], ordered=True).fillna('Non-white')" ] }, { @@ -330,7 +312,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We use Pandas for one-hot encoding for easy reference to columns associated with protected attributes, information necessary for Exponentiated Gradient Reduction" + "We use sklearn for one-hot encoding for easy reference to columns associated with protected attributes, information necessary for Exponentiated Gradient Reduction" ] }, { @@ -360,31 +342,29 @@ " \n", " \n", " \n", - " \n", - " age\n", - " education-num\n", - " capital-gain\n", - " capital-loss\n", - " hours-per-week\n", " workclass_Federal-gov\n", " workclass_Local-gov\n", " workclass_Private\n", " workclass_Self-emp-inc\n", " workclass_Self-emp-not-inc\n", + " workclass_State-gov\n", + " workclass_Without-pay\n", + " education_10th\n", + " education_11th\n", + " education_12th\n", " ...\n", - " native-country_Portugal\n", - " native-country_Puerto-Rico\n", - " native-country_Scotland\n", - " native-country_South\n", - " native-country_Taiwan\n", " native-country_Thailand\n", " native-country_Trinadad&Tobago\n", " native-country_United-States\n", " native-country_Vietnam\n", " native-country_Yugoslavia\n", + " age\n", + " education-num\n", + " capital-gain\n", + " capital-loss\n", + " hours-per-week\n", " \n", " \n", - " \n", " race\n", " sex\n", " \n", @@ -412,134 +392,125 @@ " \n", " \n", " \n", - " 30149\n", - " 1\n", + " 1\n", " 1\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " ...\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", " 58.0\n", " 11.0\n", " 0.0\n", " 0.0\n", " 42.0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 1\n", - " ...\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 1\n", - " 0\n", - " 0\n", " \n", " \n", - " 12028\n", - " 1\n", " 0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " ...\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", " 51.0\n", " 12.0\n", " 0.0\n", " 0.0\n", " 30.0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 1\n", - " ...\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", " \n", " \n", - " 36374\n", - " 1\n", " 1\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " ...\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", " 26.0\n", " 14.0\n", " 0.0\n", " 1887.0\n", " 40.0\n", - " 0\n", - " 0\n", - " 1\n", - " 0\n", - " 0\n", - " ...\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 1\n", - " 0\n", - " 0\n", " \n", " \n", - " 8055\n", - " 1\n", " 1\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " ...\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", " 44.0\n", " 3.0\n", " 0.0\n", " 0.0\n", " 40.0\n", - " 0\n", - " 0\n", - " 1\n", - " 0\n", - " 0\n", - " ...\n", - " 0\n", - " 1\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", " \n", " \n", - " 38108\n", - " 1\n", " 1\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", + " ...\n", + " 0.0\n", + " 0.0\n", + " 1.0\n", + " 0.0\n", + " 0.0\n", " 33.0\n", " 6.0\n", " 0.0\n", " 0.0\n", " 40.0\n", - " 0\n", - " 0\n", - " 1\n", - " 0\n", - " 0\n", - " ...\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 0\n", - " 1\n", - " 0\n", - " 0\n", " \n", " \n", "\n", @@ -547,77 +518,61 @@ "" ], "text/plain": [ - " age education-num capital-gain capital-loss \\\n", - " race sex \n", - "30149 1 1 58.0 11.0 0.0 0.0 \n", - "12028 1 0 51.0 12.0 0.0 0.0 \n", - "36374 1 1 26.0 14.0 0.0 1887.0 \n", - "8055 1 1 44.0 3.0 0.0 0.0 \n", - "38108 1 1 33.0 6.0 0.0 0.0 \n", - "\n", - " hours-per-week workclass_Federal-gov workclass_Local-gov \\\n", - " race sex \n", - "30149 1 1 42.0 0 0 \n", - "12028 1 0 30.0 0 0 \n", - "36374 1 1 40.0 0 0 \n", - "8055 1 1 40.0 0 0 \n", - "38108 1 1 40.0 0 0 \n", + " workclass_Federal-gov workclass_Local-gov workclass_Private \\\n", + "race sex \n", + "1 1 0.0 0.0 0.0 \n", + " 0 0.0 0.0 0.0 \n", + " 1 0.0 0.0 1.0 \n", + " 1 0.0 0.0 1.0 \n", + " 1 0.0 0.0 1.0 \n", "\n", - " workclass_Private workclass_Self-emp-inc \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 1 0 \n", - "8055 1 1 1 0 \n", - "38108 1 1 1 0 \n", + " workclass_Self-emp-inc workclass_Self-emp-not-inc \\\n", + "race sex \n", + "1 1 0.0 1.0 \n", + " 0 0.0 1.0 \n", + " 1 0.0 0.0 \n", + " 1 0.0 0.0 \n", + " 1 0.0 0.0 \n", "\n", - " workclass_Self-emp-not-inc ... native-country_Portugal \\\n", - " race sex ... \n", - "30149 1 1 1 ... 0 \n", - "12028 1 0 1 ... 0 \n", - "36374 1 1 0 ... 0 \n", - "8055 1 1 0 ... 0 \n", - "38108 1 1 0 ... 0 \n", + " workclass_State-gov workclass_Without-pay education_10th \\\n", + "race sex \n", + "1 1 0.0 0.0 0.0 \n", + " 0 0.0 0.0 0.0 \n", + " 1 0.0 0.0 0.0 \n", + " 1 0.0 0.0 0.0 \n", + " 1 0.0 0.0 1.0 \n", "\n", - " native-country_Puerto-Rico native-country_Scotland \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 0 0 \n", - "8055 1 1 1 0 \n", - "38108 1 1 0 0 \n", + " education_11th education_12th ... native-country_Thailand \\\n", + "race sex ... \n", + "1 1 0.0 0.0 ... 0.0 \n", + " 0 0.0 0.0 ... 0.0 \n", + " 1 0.0 0.0 ... 0.0 \n", + " 1 0.0 0.0 ... 0.0 \n", + " 1 0.0 0.0 ... 0.0 \n", "\n", - " native-country_South native-country_Taiwan \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 0 0 \n", - "8055 1 1 0 0 \n", - "38108 1 1 0 0 \n", + " native-country_Trinadad&Tobago native-country_United-States \\\n", + "race sex \n", + "1 1 0.0 1.0 \n", + " 0 0.0 0.0 \n", + " 1 0.0 1.0 \n", + " 1 0.0 0.0 \n", + " 1 0.0 1.0 \n", "\n", - " native-country_Thailand native-country_Trinadad&Tobago \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 0 0 \n", - "8055 1 1 0 0 \n", - "38108 1 1 0 0 \n", + " native-country_Vietnam native-country_Yugoslavia age \\\n", + "race sex \n", + "1 1 0.0 0.0 58.0 \n", + " 0 0.0 0.0 51.0 \n", + " 1 0.0 0.0 26.0 \n", + " 1 0.0 0.0 44.0 \n", + " 1 0.0 0.0 33.0 \n", "\n", - " native-country_United-States native-country_Vietnam \\\n", - " race sex \n", - "30149 1 1 1 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 1 0 \n", - "8055 1 1 0 0 \n", - "38108 1 1 1 0 \n", - "\n", - " native-country_Yugoslavia \n", - " race sex \n", - "30149 1 1 0 \n", - "12028 1 0 0 \n", - "36374 1 1 0 \n", - "8055 1 1 0 \n", - "38108 1 1 0 \n", + " education-num capital-gain capital-loss hours-per-week \n", + "race sex \n", + "1 1 11.0 0.0 0.0 42.0 \n", + " 0 12.0 0.0 0.0 30.0 \n", + " 1 14.0 0.0 1887.0 40.0 \n", + " 1 3.0 0.0 0.0 40.0 \n", + " 1 6.0 0.0 0.0 40.0 \n", "\n", "[5 rows x 100 columns]" ] @@ -628,7 +583,12 @@ } ], "source": [ - "X_train, X_test = pd.get_dummies(X_train), pd.get_dummies(X_test)\n", + "ohe = make_column_transformer(\n", + " (OneHotEncoder(sparse=False), X_train.dtypes == 'category'),\n", + " remainder='passthrough', verbose_feature_names_out=False)\n", + "X_train = pd.DataFrame(ohe.fit_transform(X_train), columns=ohe.get_feature_names_out(), index=X_train.index)\n", + "X_test = pd.DataFrame(ohe.transform(X_test), columns=ohe.get_feature_names_out(), index=X_test.index)\n", + "\n", "X_train.head()" ] }, @@ -647,12 +607,12 @@ { "data": { "text/plain": [ - " race sex\n", - "30149 1 1 0\n", - "12028 1 0 1\n", - "36374 1 1 1\n", - "8055 1 1 0\n", - "38108 1 1 0\n", + "race sex\n", + "1 1 0\n", + " 0 1\n", + " 1 1\n", + " 1 0\n", + " 1 0\n", "dtype: int64" ] }, @@ -685,30 +645,20 @@ "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.8373995724920764\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n" - ] + "data": { + "text/plain": [ + "0.8460234392275374" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "y_pred = LogisticRegression(solver='lbfgs').fit(X_train, y_train).predict(X_test)\n", + "y_pred = LogisticRegression(solver='liblinear').fit(X_train, y_train).predict(X_test)\n", "lr_acc = accuracy_score(y_test, y_pred)\n", - "print(lr_acc)" + "lr_acc" ] }, { @@ -728,16 +678,19 @@ "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.09897521109915139\n" - ] + "data": { + "text/plain": [ + "0.09335303807799161" + ] + }, + "execution_count": 10, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ "lr_aoe_sex = average_odds_error(y_test, y_pred, prot_attr='sex')\n", - "print(lr_aoe_sex)" + "lr_aoe_sex" ] }, { @@ -746,16 +699,19 @@ "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.00867568807624941\n" - ] + "data": { + "text/plain": [ + "0.06751597777565721" + ] + }, + "execution_count": 11, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ "lr_aoe_race = average_odds_error(y_test, y_pred, prot_attr='race')\n", - "print(lr_aoe_race)" + "lr_aoe_race" ] }, { @@ -778,7 +734,7 @@ "metadata": {}, "outputs": [], "source": [ - "estimator = LogisticRegression(solver='lbfgs')" + "estimator = LogisticRegression(solver='liblinear')" ] }, { @@ -813,180 +769,42 @@ "name": "stderr", "output_type": "stream", "text": [ - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n" + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "0.8225842116901305\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n" + "0.834303825458834\n" ] } ], @@ -1013,7 +831,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "0.018426256067917424\n" + "0.02361168550972803\n" ] } ], @@ -1034,7 +852,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "0.005848503310276698\n" + "0.024975550258025947\n" ] } ], @@ -1061,7 +879,7 @@ { "data": { "text/plain": [ - "23" + "29" ] }, "execution_count": 17, @@ -1070,7 +888,7 @@ } ], "source": [ - "exp_grad_red.model._n_oracle_calls" + "exp_grad_red.model_._n_oracle_calls" ] }, { @@ -1118,179 +936,41 @@ "name": "stderr", "output_type": "stream", "text": [ - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n", - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "lbfgs failed to converge (status=1):\n", - "STOP: TOTAL NO. of ITERATIONS REACHED LIMIT.\n", - "\n", - "Increase the number of iterations (max_iter) or scale the data as shown in:\n", - " https://scikit-learn.org/stable/modules/preprocessing.html\n", - "Please also refer to the documentation for alternative solver options:\n", - " https://scikit-learn.org/stable/modules/linear_model.html#logistic-regression\n" + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n" ] }, { "data": { "text/plain": [ - "0.8225842116901305" + "0.834303825458834" ] }, "execution_count": 19, @@ -1318,7 +998,7 @@ { "data": { "text/plain": [ - "0.018426256067917424" + "0.02361168550972803" ] }, "execution_count": 20, @@ -1338,7 +1018,7 @@ { "data": { "text/plain": [ - "0.005848503310276698" + "0.024975550258025947" ] }, "execution_count": 21, @@ -1353,7 +1033,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3.9.7 ('aif360')", "language": "python", "name": "python3" }, @@ -1367,9 +1047,14 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.3" + "version": "3.9.7" + }, + "vscode": { + "interpreter": { + "hash": "d0c5ced7753e77a483fec8ff7063075635521cce6e0bd54998c8f174742209dd" + } } }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/examples/sklearn/demo_grid_search_reduction_classification_sklearn.ipynb b/examples/sklearn/demo_grid_search_reduction_classification_sklearn.ipynb index 4ce0a2cd..3ed35dcf 100644 --- a/examples/sklearn/demo_grid_search_reduction_classification_sklearn.ipynb +++ b/examples/sklearn/demo_grid_search_reduction_classification_sklearn.ipynb @@ -16,21 +16,17 @@ "metadata": {}, "outputs": [], "source": [ - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pandas as pd\n", "\n", "from sklearn.linear_model import LogisticRegression\n", "from sklearn.metrics import accuracy_score\n", - "from sklearn.model_selection import GridSearchCV, train_test_split\n", - "from sklearn.preprocessing import OneHotEncoder\n", + "from sklearn.model_selection import train_test_split\n", "\n", "from aif360.sklearn.inprocessing import GridSearchReduction\n", "\n", "from aif360.sklearn.datasets import fetch_adult\n", - "from aif360.sklearn.metrics import disparate_impact_ratio, average_odds_error, generalized_fpr\n", - "from aif360.sklearn.metrics import generalized_fnr, difference" + "from aif360.sklearn.metrics import average_odds_error" ] }, { @@ -76,7 +72,6 @@ " \n", " \n", " \n", - " \n", " age\n", " workclass\n", " education\n", @@ -92,7 +87,6 @@ " native-country\n", " \n", " \n", - " \n", " race\n", " sex\n", " \n", @@ -112,7 +106,6 @@ " \n", " \n", " \n", - " 0\n", " Non-white\n", " Male\n", " 25.0\n", @@ -122,7 +115,7 @@ " Never-married\n", " Machine-op-inspct\n", " Own-child\n", - " Non-white\n", + " Black\n", " Male\n", " 0.0\n", " 0.0\n", @@ -130,8 +123,7 @@ " United-States\n", " \n", " \n", - " 1\n", - " White\n", + " White\n", " Male\n", " 38.0\n", " Private\n", @@ -148,8 +140,6 @@ " United-States\n", " \n", " \n", - " 2\n", - " White\n", " Male\n", " 28.0\n", " Local-gov\n", @@ -166,7 +156,6 @@ " United-States\n", " \n", " \n", - " 3\n", " Non-white\n", " Male\n", " 44.0\n", @@ -176,7 +165,7 @@ " Married-civ-spouse\n", " Machine-op-inspct\n", " Husband\n", - " Non-white\n", + " Black\n", " Male\n", " 7688.0\n", " 0.0\n", @@ -184,7 +173,6 @@ " United-States\n", " \n", " \n", - " 5\n", " White\n", " Male\n", " 34.0\n", @@ -206,37 +194,37 @@ "" ], "text/plain": [ - " age workclass education education-num \\\n", - " race sex \n", - "0 Non-white Male 25.0 Private 11th 7.0 \n", - "1 White Male 38.0 Private HS-grad 9.0 \n", - "2 White Male 28.0 Local-gov Assoc-acdm 12.0 \n", - "3 Non-white Male 44.0 Private Some-college 10.0 \n", - "5 White Male 34.0 Private 10th 6.0 \n", + " age workclass education education-num \\\n", + "race sex \n", + "Non-white Male 25.0 Private 11th 7.0 \n", + "White Male 38.0 Private HS-grad 9.0 \n", + " Male 28.0 Local-gov Assoc-acdm 12.0 \n", + "Non-white Male 44.0 Private Some-college 10.0 \n", + "White Male 34.0 Private 10th 6.0 \n", "\n", - " marital-status occupation relationship \\\n", - " race sex \n", - "0 Non-white Male Never-married Machine-op-inspct Own-child \n", - "1 White Male Married-civ-spouse Farming-fishing Husband \n", - "2 White Male Married-civ-spouse Protective-serv Husband \n", - "3 Non-white Male Married-civ-spouse Machine-op-inspct Husband \n", - "5 White Male Never-married Other-service Not-in-family \n", + " marital-status occupation relationship race \\\n", + "race sex \n", + "Non-white Male Never-married Machine-op-inspct Own-child Black \n", + "White Male Married-civ-spouse Farming-fishing Husband White \n", + " Male Married-civ-spouse Protective-serv Husband White \n", + "Non-white Male Married-civ-spouse Machine-op-inspct Husband Black \n", + "White Male Never-married Other-service Not-in-family White \n", "\n", - " race sex capital-gain capital-loss hours-per-week \\\n", - " race sex \n", - "0 Non-white Male Non-white Male 0.0 0.0 40.0 \n", - "1 White Male White Male 0.0 0.0 50.0 \n", - "2 White Male White Male 0.0 0.0 40.0 \n", - "3 Non-white Male Non-white Male 7688.0 0.0 40.0 \n", - "5 White Male White Male 0.0 0.0 30.0 \n", + " sex capital-gain capital-loss hours-per-week \\\n", + "race sex \n", + "Non-white Male Male 0.0 0.0 40.0 \n", + "White Male Male 0.0 0.0 50.0 \n", + " Male Male 0.0 0.0 40.0 \n", + "Non-white Male Male 7688.0 0.0 40.0 \n", + "White Male Male 0.0 0.0 30.0 \n", "\n", - " native-country \n", - " race sex \n", - "0 Non-white Male United-States \n", - "1 White Male United-States \n", - "2 White Male United-States \n", - "3 Non-white Male United-States \n", - "5 White Male United-States " + " native-country \n", + "race sex \n", + "Non-white Male United-States \n", + "White Male United-States \n", + " Male United-States \n", + "Non-white Male United-States \n", + "White Male United-States " ] }, "execution_count": 2, @@ -249,16 +237,6 @@ "X.head()" ] }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "# there is one unused category ('Never-worked') that was dropped during dropna\n", - "X.workclass.cat.remove_unused_categories(inplace=True)" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -268,7 +246,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ @@ -285,7 +263,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "metadata": {}, "outputs": [], "source": [ @@ -301,7 +279,7 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 5, "metadata": {}, "outputs": [], "source": [ @@ -318,7 +296,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 6, "metadata": {}, "outputs": [ { @@ -343,31 +321,29 @@ " \n", " \n", " \n", - " \n", " age\n", " education-num\n", " capital-gain\n", " capital-loss\n", " hours-per-week\n", - " workclass_Federal-gov\n", - " workclass_Local-gov\n", " workclass_Private\n", - " workclass_Self-emp-inc\n", " workclass_Self-emp-not-inc\n", + " workclass_Self-emp-inc\n", + " workclass_Federal-gov\n", + " workclass_Local-gov\n", " ...\n", - " native-country_Portugal\n", - " native-country_Puerto-Rico\n", + " native-country_Guatemala\n", + " native-country_Nicaragua\n", " native-country_Scotland\n", - " native-country_South\n", - " native-country_Taiwan\n", " native-country_Thailand\n", - " native-country_Trinadad&Tobago\n", - " native-country_United-States\n", - " native-country_Vietnam\n", " native-country_Yugoslavia\n", + " native-country_El-Salvador\n", + " native-country_Trinadad&Tobago\n", + " native-country_Peru\n", + " native-country_Hong\n", + " native-country_Holand-Netherlands\n", " \n", " \n", - " \n", " race\n", " sex\n", " \n", @@ -395,8 +371,7 @@ " \n", " \n", " \n", - " 30149\n", - " 1\n", + " 1\n", " 1\n", " 58.0\n", " 11.0\n", @@ -404,10 +379,10 @@ " 0.0\n", " 42.0\n", " 0\n", + " 1\n", " 0\n", " 0\n", " 0\n", - " 1\n", " ...\n", " 0\n", " 0\n", @@ -416,13 +391,11 @@ " 0\n", " 0\n", " 0\n", - " 1\n", + " 0\n", " 0\n", " 0\n", " \n", " \n", - " 12028\n", - " 1\n", " 0\n", " 51.0\n", " 12.0\n", @@ -430,10 +403,10 @@ " 0.0\n", " 30.0\n", " 0\n", + " 1\n", " 0\n", " 0\n", " 0\n", - " 1\n", " ...\n", " 0\n", " 0\n", @@ -447,17 +420,15 @@ " 0\n", " \n", " \n", - " 36374\n", - " 1\n", " 1\n", " 26.0\n", " 14.0\n", " 0.0\n", " 1887.0\n", " 40.0\n", + " 1\n", " 0\n", " 0\n", - " 1\n", " 0\n", " 0\n", " ...\n", @@ -468,27 +439,25 @@ " 0\n", " 0\n", " 0\n", - " 1\n", + " 0\n", " 0\n", " 0\n", " \n", " \n", - " 8055\n", - " 1\n", " 1\n", " 44.0\n", " 3.0\n", " 0.0\n", " 0.0\n", " 40.0\n", + " 1\n", " 0\n", " 0\n", - " 1\n", " 0\n", " 0\n", " ...\n", " 0\n", - " 1\n", + " 0\n", " 0\n", " 0\n", " 0\n", @@ -499,17 +468,15 @@ " 0\n", " \n", " \n", - " 38108\n", - " 1\n", " 1\n", " 33.0\n", " 6.0\n", " 0.0\n", " 0.0\n", " 40.0\n", + " 1\n", " 0\n", " 0\n", - " 1\n", " 0\n", " 0\n", " ...\n", @@ -520,98 +487,92 @@ " 0\n", " 0\n", " 0\n", - " 1\n", + " 0\n", " 0\n", " 0\n", " \n", " \n", "\n", - "

5 rows × 100 columns

\n", + "

5 rows × 102 columns

\n", "" ], "text/plain": [ - " age education-num capital-gain capital-loss \\\n", - " race sex \n", - "30149 1 1 58.0 11.0 0.0 0.0 \n", - "12028 1 0 51.0 12.0 0.0 0.0 \n", - "36374 1 1 26.0 14.0 0.0 1887.0 \n", - "8055 1 1 44.0 3.0 0.0 0.0 \n", - "38108 1 1 33.0 6.0 0.0 0.0 \n", + " age education-num capital-gain capital-loss hours-per-week \\\n", + "race sex \n", + "1 1 58.0 11.0 0.0 0.0 42.0 \n", + " 0 51.0 12.0 0.0 0.0 30.0 \n", + " 1 26.0 14.0 0.0 1887.0 40.0 \n", + " 1 44.0 3.0 0.0 0.0 40.0 \n", + " 1 33.0 6.0 0.0 0.0 40.0 \n", "\n", - " hours-per-week workclass_Federal-gov workclass_Local-gov \\\n", - " race sex \n", - "30149 1 1 42.0 0 0 \n", - "12028 1 0 30.0 0 0 \n", - "36374 1 1 40.0 0 0 \n", - "8055 1 1 40.0 0 0 \n", - "38108 1 1 40.0 0 0 \n", + " workclass_Private workclass_Self-emp-not-inc \\\n", + "race sex \n", + "1 1 0 1 \n", + " 0 0 1 \n", + " 1 1 0 \n", + " 1 1 0 \n", + " 1 1 0 \n", "\n", - " workclass_Private workclass_Self-emp-inc \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 1 0 \n", - "8055 1 1 1 0 \n", - "38108 1 1 1 0 \n", + " workclass_Self-emp-inc workclass_Federal-gov workclass_Local-gov \\\n", + "race sex \n", + "1 1 0 0 0 \n", + " 0 0 0 0 \n", + " 1 0 0 0 \n", + " 1 0 0 0 \n", + " 1 0 0 0 \n", "\n", - " workclass_Self-emp-not-inc ... native-country_Portugal \\\n", - " race sex ... \n", - "30149 1 1 1 ... 0 \n", - "12028 1 0 1 ... 0 \n", - "36374 1 1 0 ... 0 \n", - "8055 1 1 0 ... 0 \n", - "38108 1 1 0 ... 0 \n", + " ... native-country_Guatemala native-country_Nicaragua \\\n", + "race sex ... \n", + "1 1 ... 0 0 \n", + " 0 ... 0 0 \n", + " 1 ... 0 0 \n", + " 1 ... 0 0 \n", + " 1 ... 0 0 \n", "\n", - " native-country_Puerto-Rico native-country_Scotland \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 0 0 \n", - "8055 1 1 1 0 \n", - "38108 1 1 0 0 \n", + " native-country_Scotland native-country_Thailand \\\n", + "race sex \n", + "1 1 0 0 \n", + " 0 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", "\n", - " native-country_South native-country_Taiwan \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 0 0 \n", - "8055 1 1 0 0 \n", - "38108 1 1 0 0 \n", + " native-country_Yugoslavia native-country_El-Salvador \\\n", + "race sex \n", + "1 1 0 0 \n", + " 0 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", "\n", - " native-country_Thailand native-country_Trinadad&Tobago \\\n", - " race sex \n", - "30149 1 1 0 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 0 0 \n", - "8055 1 1 0 0 \n", - "38108 1 1 0 0 \n", + " native-country_Trinadad&Tobago native-country_Peru \\\n", + "race sex \n", + "1 1 0 0 \n", + " 0 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", "\n", - " native-country_United-States native-country_Vietnam \\\n", - " race sex \n", - "30149 1 1 1 0 \n", - "12028 1 0 0 0 \n", - "36374 1 1 1 0 \n", - "8055 1 1 0 0 \n", - "38108 1 1 1 0 \n", + " native-country_Hong native-country_Holand-Netherlands \n", + "race sex \n", + "1 1 0 0 \n", + " 0 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", + " 1 0 0 \n", "\n", - " native-country_Yugoslavia \n", - " race sex \n", - "30149 1 1 0 \n", - "12028 1 0 0 \n", - "36374 1 1 0 \n", - "8055 1 1 0 \n", - "38108 1 1 0 \n", - "\n", - "[5 rows x 100 columns]" + "[5 rows x 102 columns]" ] }, - "execution_count": 7, + "execution_count": 6, "metadata": {}, "output_type": "execute_result" } ], "source": [ "X_train, X_test = pd.get_dummies(X_train), pd.get_dummies(X_test)\n", + "X_train = X_train.drop(columns=['sex_Female'])\n", + "X_test = X_test.drop(columns=['sex_Female'])\n", "X_train.head()" ] }, @@ -624,22 +585,22 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 7, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - " race sex\n", - "30149 1 1 0\n", - "12028 1 0 1\n", - "36374 1 1 1\n", - "8055 1 1 0\n", - "38108 1 1 0\n", + "race sex\n", + "1 1 0\n", + " 0 1\n", + " 1 1\n", + " 1 0\n", + " 1 0\n", "dtype: int64" ] }, - "execution_count": 8, + "execution_count": 7, "metadata": {}, "output_type": "execute_result" } @@ -664,19 +625,19 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "0.8373258642293802\n" + "0.8453600648632712\n" ] } ], "source": [ - "y_pred = LogisticRegression(solver='lbfgs').fit(X_train, y_train).predict(X_test)\n", + "y_pred = LogisticRegression(solver='liblinear', random_state=1234).fit(X_train, y_train).predict(X_test)\n", "lr_acc = accuracy_score(y_test, y_pred)\n", "print(lr_acc)" ] @@ -694,14 +655,14 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 9, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "0.10043769764182503\n" + "0.09356509680536546\n" ] } ], @@ -726,27 +687,27 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ - "estimator = LogisticRegression(solver='lbfgs')" + "estimator = LogisticRegression(solver='liblinear', random_state=1234)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Determine the columns associated with the protected attribute(s). Grid search can handle more then one attribute but it is computationally expensive. A similar method with less computational overhead is exponentiated gradient reduction, detailed at [examples/sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb](sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb)." + "Determine the columns associated with the protected attribute(s). Grid search can handle more than one attribute but it is computationally expensive. A similar method with less computational overhead is exponentiated gradient reduction, detailed at [examples/sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb](sklearn/demo_exponentiated_gradient_reduction_sklearn.ipynb)." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ - "prot_attr_cols = [colname for colname in X_train if \"sex\" in colname]" + "prot_attr = 'sex_Male'" ] }, { @@ -758,20 +719,46 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 12, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "0.8318714527898577\n" + "0.8455074813886637\n" ] } ], "source": [ "np.random.seed(0) #need for reproducibility\n", - "grid_search_red = GridSearchReduction(prot_attr=prot_attr_cols, \n", + "grid_search_red = GridSearchReduction(prot_attr=prot_attr, \n", " estimator=estimator, \n", " constraints=\"EqualizedOdds\",\n", " grid_size=20,\n", @@ -786,14 +773,14 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "0.0551512399603683\n" + "0.06715455716850638\n" ] } ], @@ -814,27 +801,54 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 14, "metadata": { "scrolled": true }, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n", + "Using the level keyword in DataFrame and Series aggregations is deprecated and will be removed in a future version. Use groupby instead. df.sum(level=1) should use df.groupby(level=1).sum().\n" + ] + }, { "data": { "text/plain": [ - "0.8318714527898577" + "0.8455074813886637" ] }, - "execution_count": 15, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "import fairlearn.reductions as red \n", + "import fairlearn.reductions as red\n", + "\n", "\n", "np.random.seed(0) #need for reproducibility\n", - "grid_search_red = GridSearchReduction(prot_attr=prot_attr_cols, \n", + "grid_search_red = GridSearchReduction(prot_attr=prot_attr, \n", " estimator=estimator, \n", " constraints=red.EqualizedOdds(),\n", " grid_size=20,\n", @@ -845,16 +859,16 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 15, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - "0.0551512399603683" + "0.06715455716850638" ] }, - "execution_count": 16, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -866,7 +880,7 @@ ], "metadata": { "kernelspec": { - "display_name": "Python 3", + "display_name": "Python 3.9.7 ('aif360')", "language": "python", "name": "python3" }, @@ -880,9 +894,14 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.3" + "version": "3.9.7" + }, + "vscode": { + "interpreter": { + "hash": "d0c5ced7753e77a483fec8ff7063075635521cce6e0bd54998c8f174742209dd" + } } }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/examples/sklearn/demo_grid_search_reduction_regression_sklearn.ipynb b/examples/sklearn/demo_grid_search_reduction_regression_sklearn.ipynb index e90dc3ab..76a1a8b7 100644 --- a/examples/sklearn/demo_grid_search_reduction_regression_sklearn.ipynb +++ b/examples/sklearn/demo_grid_search_reduction_regression_sklearn.ipynb @@ -16,18 +16,17 @@ "metadata": {}, "outputs": [], "source": [ - "%matplotlib inline\n", - "import matplotlib.pyplot as plt\n", "import numpy as np\n", "import pandas as pd\n", "\n", + "from sklearn.compose import TransformedTargetRegressor\n", "from sklearn.linear_model import LinearRegression\n", "from sklearn.metrics import mean_absolute_error\n", - "from sklearn import preprocessing\n", + "from sklearn.preprocessing import MinMaxScaler\n", "\n", + "from aif360.sklearn.datasets import fetch_lawschool_gpa\n", "from aif360.sklearn.inprocessing import GridSearchReduction\n", - "\n", - "from aif360.sklearn.datasets import fetch_lawschool_gpa" + "from aif360.sklearn.metrics import difference" ] }, { @@ -72,13 +71,11 @@ " \n", " \n", " \n", - " \n", " lsat\n", " ugpa\n", " race\n", " \n", " \n", - " \n", " race\n", " \n", " \n", @@ -88,170 +85,32 @@ " \n", " \n", " 0\n", - " black\n", " 38.0\n", " 3.3\n", - " black\n", - " \n", - " \n", - " 1\n", - " white\n", - " 34.0\n", - " 4.0\n", - " white\n", - " \n", - " \n", - " 2\n", - " white\n", - " 34.0\n", - " 3.9\n", - " white\n", - " \n", - " \n", - " 3\n", - " white\n", - " 45.0\n", - " 3.3\n", - " white\n", - " \n", - " \n", - " 4\n", - " white\n", - " 39.0\n", - " 2.5\n", - " white\n", - " \n", - " \n", - "\n", - "" - ], - "text/plain": [ - " lsat ugpa race\n", - " race \n", - "0 black 38.0 3.3 black\n", - "1 white 34.0 4.0 white\n", - "2 white 34.0 3.9 white\n", - "3 white 45.0 3.3 white\n", - "4 white 39.0 2.5 white" - ] - }, - "execution_count": 2, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "X_train, y_train = fetch_lawschool_gpa(subset=\"train\")\n", - "X_test, y_test = fetch_lawschool_gpa(subset=\"test\")\n", - "X_train.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We can then map the protected attributes to integers," - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [], - "source": [ - "X_train.index = pd.MultiIndex.from_arrays(X_train.index.codes, names=X_train.index.names)\n", - "X_test.index = pd.MultiIndex.from_arrays(X_test.index.codes, names=X_test.index.names)\n", - "y_train.index = pd.MultiIndex.from_arrays(y_train.index.codes, names=y_train.index.names)\n", - "y_test.index = pd.MultiIndex.from_arrays(y_test.index.codes, names=y_test.index.names)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We use Pandas for one-hot encoding for easy reference to columns associated with protected attributes, information necessary for grid search reduction." - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "metadata": {}, - "outputs": [ - { - "data": { - "text/html": [ - "
\n", - "\n", - "\n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", - " \n", " \n", " \n", " \n", " \n", - " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", @@ -259,22 +118,23 @@ "" ], "text/plain": [ - " lsat ugpa race_black race_white\n", - " race \n", - "0 0 38.0 3.3 1 0\n", - "1 1 34.0 4.0 0 1\n", - "2 1 34.0 3.9 0 1\n", - "3 1 45.0 3.3 0 1\n", - "4 1 39.0 2.5 0 1" + " lsat ugpa race\n", + "race \n", + "0 38.0 3.3 0\n", + "1 34.0 4.0 1\n", + "1 34.0 3.9 1\n", + "1 45.0 3.3 1\n", + "1 39.0 2.5 1" ] }, - "execution_count": 4, + "execution_count": 2, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "X_train, X_test = pd.get_dummies(X_train), pd.get_dummies(X_test)\n", + "X_train, y_train = fetch_lawschool_gpa(\"train\", numeric_only=True)\n", + "X_test, y_test = fetch_lawschool_gpa(\"test\", numeric_only=True)\n", "X_train.head()" ] }, @@ -282,12 +142,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We normalize the continuous values" + "We normalize the continuous values, making sure to propagate column names associated with protected attributes, information necessary for grid search reduction." ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "metadata": {}, "outputs": [ { @@ -311,60 +171,46 @@ " \n", " \n", " \n", - " \n", " \n", " \n", - " \n", - " \n", + " \n", " \n", " \n", - " \n", " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", " \n", " \n", - " \n", " \n", " \n", - " \n", " \n", " \n", " \n", " \n", - " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", - " \n", " \n", " \n", " \n", @@ -372,182 +218,93 @@ "" ], "text/plain": [ - " lsat ugpa race_black race_white\n", - " race \n", - "0 0 0.729730 0.825 1.0 0.0\n", - "1 1 0.621622 1.000 0.0 1.0\n", - "2 1 0.621622 0.975 0.0 1.0\n", - "3 1 0.918919 0.825 0.0 1.0\n", - "4 1 0.756757 0.625 0.0 1.0" + " lsat ugpa race\n", + "race \n", + "0 0.729730 0.825 0.0\n", + "1 0.621622 1.000 1.0\n", + "1 0.621622 0.975 1.0\n", + "1 0.918919 0.825 1.0\n", + "1 0.756757 0.625 1.0" ] }, - "execution_count": 5, + "execution_count": 3, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "min_max_scaler = preprocessing.MinMaxScaler()\n", - "X_train = pd.DataFrame(min_max_scaler.fit_transform(X_train.values),columns=list(X_train),index=X_train.index)\n", - "X_test = pd.DataFrame(min_max_scaler.transform(X_test.values),columns=list(X_test),index=X_test.index)\n", + "scaler = MinMaxScaler()\n", + "\n", + "X_train = pd.DataFrame(scaler.fit_transform(X_train), columns=X_train.columns, index=X_train.index)\n", + "X_test = pd.DataFrame(scaler.transform(X_test), columns=X_test.columns, index=X_test.index)\n", + "\n", "X_train.head()" ] }, { - "cell_type": "code", - "execution_count": 6, + "cell_type": "markdown", "metadata": {}, - "outputs": [], "source": [ - "min_max_scaler = preprocessing.MinMaxScaler()\n", - "y_train = pd.Series(min_max_scaler.fit_transform(y_train.values.reshape(-1, 1)).flatten(),index=y_train.index)\n", - "y_test = pd.Series(min_max_scaler.transform(y_test.values.reshape(-1, 1)).flatten(),index=y_test.index)" + "### Running metrics" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The protected attribute information is also replicated in the labels:" + "With the data in this format, we can easily train a scikit-learn model and get predictions for the test data. We drop the protective attribule columns so that they are not used in the model." ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 4, "metadata": {}, "outputs": [ { "data": { "text/plain": [ - " race\n", - "0 0 0.488636\n", - "1 1 0.688131\n", - "2 1 0.398990\n", - "3 1 0.758838\n", - "4 1 0.482323\n", - "dtype: float64" + "0.7400826321650612" ] }, - "execution_count": 7, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "y_train.head()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### Running metrics" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "With the data in this format, we can easily train a scikit-learn model and get predictions for the test data. We drop the protective attribule columns so that they are not used in the model." - ] - }, - { - "cell_type": "code", - "execution_count": 8, - "metadata": {}, - "outputs": [], - "source": [ - "prot_attr_cols = [col for col in list(X_train) if \"race\" in col]" - ] - }, - { - "cell_type": "code", - "execution_count": 9, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "0.09344477678851784\n" - ] - } - ], - "source": [ - "lr = LinearRegression().fit(X_train.drop(prot_attr_cols,axis=1), y_train)\n", - "y_pred = lr.predict(X_test.drop(prot_attr_cols, axis=1))\n", + "tt = TransformedTargetRegressor(LinearRegression(), transformer=scaler)\n", + "tt = tt.fit(X_train.drop([\"race\"], axis=1), y_train)\n", + "y_pred = tt.predict(X_test.drop([\"race\"], axis=1))\n", "lr_mae = mean_absolute_error(y_test, y_pred)\n", - "print(lr_mae)" + "lr_mae" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We can assess how the mean absolute error differs across groups" + "We can assess how the mean absolute error differs across groups simply" ] }, { "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "White: 0.09151357295567962\n" - ] - } - ], - "source": [ - "X_test_white = X_test.iloc[X_test.index.get_level_values('race') == 1]\n", - "y_test_white = y_test.iloc[y_test.index.get_level_values('race') == 1]\n", - "\n", - "y_pred_white = lr.predict(X_test_white.drop(prot_attr_cols, axis=1))\n", - "\n", - "lr_mae_w = mean_absolute_error(y_test_white, y_pred_white)\n", - "print(\"White:\", lr_mae_w)" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Black: 0.11726179331646831\n" - ] - } - ], - "source": [ - "X_test_black = X_test.iloc[X_test.index.get_level_values('race') == 0]\n", - "y_test_black = y_test.iloc[y_test.index.get_level_values('race') == 0]\n", - "\n", - "y_pred_black = lr.predict(X_test_black.drop(prot_attr_cols, axis=1))\n", - "\n", - "lr_mae_b = mean_absolute_error(y_test_black, y_pred_black)\n", - "print(\"Black:\", lr_mae_b)" - ] - }, - { - "cell_type": "code", - "execution_count": 12, + "execution_count": 5, "metadata": {}, "outputs": [ { - "name": "stdout", - "output_type": "stream", - "text": [ - "Mean absolute error difference across groups: 0.025748220360788693\n" - ] + "data": { + "text/plain": [ + "0.20392590525744636" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "print(\"Mean absolute error difference across groups:\", lr_mae_b-lr_mae_w)" + "lr_mae_diff = difference(mean_absolute_error, y_test, y_pred)\n", + "lr_mae_diff" ] }, { @@ -561,16 +318,16 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Choose a base model for the candidate regressors. Base models should implement a fit method that can take a sample weight as input. For details refer to the docs. " + "Reuse the base model for the candidate regressors. Base models should implement a fit method that can take a sample weight as input. For details refer to the docs. " ] }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 6, "metadata": {}, "outputs": [], "source": [ - "estimator = LinearRegression()" + "estimator = TransformedTargetRegressor(LinearRegression(), transformer=scaler)" ] }, { @@ -582,25 +339,25 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 7, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "0.09624645677710374\n" + "0.7622719376746614\n" ] } ], "source": [ "np.random.seed(0) #need for reproducibility\n", - "grid_search_red = GridSearchReduction(prot_attr=prot_attr_cols, \n", + "grid_search_red = GridSearchReduction(prot_attr=\"race\", \n", " estimator=estimator, \n", " constraints=\"GroupLoss\",\n", " loss=\"Absolute\",\n", - " min_val=0,\n", - " max_val=1,\n", + " min_val=y_train.min(),\n", + " max_val=y_train.max(),\n", " grid_size=10,\n", " drop_prot_attr=True)\n", "grid_search_red.fit(X_train, y_train)\n", @@ -609,63 +366,28 @@ "print(gs_mae)\n", "\n", "#Check if mean absolute error is comparable\n", - "assert abs(gs_mae-lr_mae)<0.01" - ] - }, - { - "cell_type": "code", - "execution_count": 15, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "White: 0.09566668133321606\n" - ] - } - ], - "source": [ - "gs_mae_w = mean_absolute_error(y_test_white, grid_search_red.predict(X_test_white))\n", - "print(\"White:\", gs_mae_w)" + "assert abs(gs_mae-lr_mae) < 0.08" ] }, { "cell_type": "code", - "execution_count": 16, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Black: 0.1033966711122104\n" - ] - } - ], - "source": [ - "gs_mae_b = mean_absolute_error(y_test_black, grid_search_red.predict(X_test_black))\n", - "print(\"Black:\", gs_mae_b)" - ] - }, - { - "cell_type": "code", - "execution_count": 17, + "execution_count": 8, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Mean absolute error difference across groups: 0.007729989778994348\n" + "0.06122151904963535\n" ] } ], "source": [ - "print(\"Mean absolute error difference across groups:\", gs_mae_b-gs_mae_w)\n", + "gs_mae_diff = difference(mean_absolute_error, y_test, gs_pred)\n", + "print(gs_mae_diff)\n", "\n", "#Check if difference decreased\n", - "assert (gs_mae_b-gs_mae_w)<(lr_mae_b-lr_mae_w)" + "assert gs_mae_diff < lr_mae_diff" ] } ], @@ -685,9 +407,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.8.3" + "version": "3.7.11" } }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/examples/sklearn/demo_new_features.ipynb b/examples/sklearn/demo_new_features.ipynb index a9b8433c..d0a85f2f 100644 --- a/examples/sklearn/demo_new_features.ipynb +++ b/examples/sklearn/demo_new_features.ipynb @@ -58,8 +58,180 @@ "outputs": [ { "data": { - "text/html": "
\n\n
lsatugparace_blackrace_white
race
0038.03.310
1134.04.001
2134.03.901
3145.03.301
4139.02.501
lsatugparace_blackrace_whiterace
race
000.7297300.8251.00.0
110.6216221.0000.01.0
210.6216220.9750.01.0
310.9189190.8250.01.0
410.7567570.6250.01.0
\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ageworkclasseducationeducation-nummarital-statusoccupationrelationshipracesexcapital-gaincapital-losshours-per-weeknative-country
racesex
0Non-whiteMale25.0Private11th7.0Never-marriedMachine-op-inspctOwn-childNon-whiteMale0.00.040.0United-States
1WhiteMale38.0PrivateHS-grad9.0Married-civ-spouseFarming-fishingHusbandWhiteMale0.00.050.0United-States
2WhiteMale28.0Local-govAssoc-acdm12.0Married-civ-spouseProtective-servHusbandWhiteMale0.00.040.0United-States
3Non-whiteMale44.0PrivateSome-college10.0Married-civ-spouseMachine-op-inspctHusbandNon-whiteMale7688.00.040.0United-States
5WhiteMale34.0Private10th6.0Never-marriedOther-serviceNot-in-familyWhiteMale0.00.030.0United-States
\n
", - "text/plain": " age workclass education education-num \\\n race sex \n0 Non-white Male 25.0 Private 11th 7.0 \n1 White Male 38.0 Private HS-grad 9.0 \n2 White Male 28.0 Local-gov Assoc-acdm 12.0 \n3 Non-white Male 44.0 Private Some-college 10.0 \n5 White Male 34.0 Private 10th 6.0 \n\n marital-status occupation relationship \\\n race sex \n0 Non-white Male Never-married Machine-op-inspct Own-child \n1 White Male Married-civ-spouse Farming-fishing Husband \n2 White Male Married-civ-spouse Protective-serv Husband \n3 Non-white Male Married-civ-spouse Machine-op-inspct Husband \n5 White Male Never-married Other-service Not-in-family \n\n race sex capital-gain capital-loss hours-per-week \\\n race sex \n0 Non-white Male Non-white Male 0.0 0.0 40.0 \n1 White Male White Male 0.0 0.0 50.0 \n2 White Male White Male 0.0 0.0 40.0 \n3 Non-white Male Non-white Male 7688.0 0.0 40.0 \n5 White Male White Male 0.0 0.0 30.0 \n\n native-country \n race sex \n0 Non-white Male United-States \n1 White Male United-States \n2 White Male United-States \n3 Non-white Male United-States \n5 White Male United-States " + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ageworkclasseducationeducation-nummarital-statusoccupationrelationshipracesexcapital-gaincapital-losshours-per-weeknative-country
racesex
Non-whiteMale25.0Private11th7.0Never-marriedMachine-op-inspctOwn-childBlackMale0.00.040.0United-States
WhiteMale38.0PrivateHS-grad9.0Married-civ-spouseFarming-fishingHusbandWhiteMale0.00.050.0United-States
Male28.0Local-govAssoc-acdm12.0Married-civ-spouseProtective-servHusbandWhiteMale0.00.040.0United-States
Non-whiteMale44.0PrivateSome-college10.0Married-civ-spouseMachine-op-inspctHusbandBlackMale7688.00.040.0United-States
WhiteMale34.0Private10th6.0Never-marriedOther-serviceNot-in-familyWhiteMale0.00.030.0United-States
\n", + "
" + ], + "text/plain": [ + " age workclass education education-num \\\n", + "race sex \n", + "Non-white Male 25.0 Private 11th 7.0 \n", + "White Male 38.0 Private HS-grad 9.0 \n", + " Male 28.0 Local-gov Assoc-acdm 12.0 \n", + "Non-white Male 44.0 Private Some-college 10.0 \n", + "White Male 34.0 Private 10th 6.0 \n", + "\n", + " marital-status occupation relationship race \\\n", + "race sex \n", + "Non-white Male Never-married Machine-op-inspct Own-child Black \n", + "White Male Married-civ-spouse Farming-fishing Husband White \n", + " Male Married-civ-spouse Protective-serv Husband White \n", + "Non-white Male Married-civ-spouse Machine-op-inspct Husband Black \n", + "White Male Never-married Other-service Not-in-family White \n", + "\n", + " sex capital-gain capital-loss hours-per-week \\\n", + "race sex \n", + "Non-white Male Male 0.0 0.0 40.0 \n", + "White Male Male 0.0 0.0 50.0 \n", + " Male Male 0.0 0.0 40.0 \n", + "Non-white Male Male 7688.0 0.0 40.0 \n", + "White Male Male 0.0 0.0 30.0 \n", + "\n", + " native-country \n", + "race sex \n", + "Non-white Male United-States \n", + "White Male United-States \n", + " Male United-States \n", + "Non-white Male United-States \n", + "White Male United-States " + ] }, "execution_count": 2, "metadata": {}, @@ -113,7 +285,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 23, "metadata": {}, "outputs": [], "source": [ @@ -130,15 +302,267 @@ }, { "cell_type": "code", - "execution_count": 6, + "execution_count": 24, "metadata": {}, "outputs": [ { "data": { - "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
0123456789...90919293949596979899
racesex
30149110.00.00.00.01.00.00.00.00.00.0...0.00.01.00.00.058.011.00.00.042.0
12028100.00.00.00.01.00.00.00.00.00.0...0.00.00.00.00.051.012.00.00.030.0
36374110.00.01.00.00.00.00.00.00.00.0...0.00.01.00.00.026.014.00.01887.040.0
8055110.00.01.00.00.00.00.00.00.00.0...0.00.00.00.00.044.03.00.00.040.0
38108110.00.01.00.00.00.00.01.00.00.0...0.00.01.00.00.033.06.00.00.040.0
\n

5 rows × 100 columns

\n
", - "text/plain": " 0 1 2 3 4 5 6 7 8 9 ... 90 \\\n race sex ... \n30149 1 1 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 \n12028 1 0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 \n36374 1 1 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 \n8055 1 1 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 ... 0.0 \n38108 1 1 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 ... 0.0 \n\n 91 92 93 94 95 96 97 98 99 \n race sex \n30149 1 1 0.0 1.0 0.0 0.0 58.0 11.0 0.0 0.0 42.0 \n12028 1 0 0.0 0.0 0.0 0.0 51.0 12.0 0.0 0.0 30.0 \n36374 1 1 0.0 1.0 0.0 0.0 26.0 14.0 0.0 1887.0 40.0 \n8055 1 1 0.0 0.0 0.0 0.0 44.0 3.0 0.0 0.0 40.0 \n38108 1 1 0.0 1.0 0.0 0.0 33.0 6.0 0.0 0.0 40.0 \n\n[5 rows x 100 columns]" + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
workclass_Federal-govworkclass_Local-govworkclass_Privateworkclass_Self-emp-incworkclass_Self-emp-not-incworkclass_State-govworkclass_Without-payeducation_10theducation_11theducation_12th...native-country_Thailandnative-country_Trinadad&Tobagonative-country_United-Statesnative-country_Vietnamnative-country_Yugoslaviaageeducation-numcapital-gaincapital-losshours-per-week
racesex
110.00.00.00.01.00.00.00.00.00.0...0.00.01.00.00.058.011.00.00.042.0
00.00.00.00.01.00.00.00.00.00.0...0.00.00.00.00.051.012.00.00.030.0
10.00.01.00.00.00.00.00.00.00.0...0.00.01.00.00.026.014.00.01887.040.0
10.00.01.00.00.00.00.00.00.00.0...0.00.00.00.00.044.03.00.00.040.0
10.00.01.00.00.00.00.01.00.00.0...0.00.01.00.00.033.06.00.00.040.0
\n", + "

5 rows × 103 columns

\n", + "
" + ], + "text/plain": [ + " workclass_Federal-gov workclass_Local-gov workclass_Private \\\n", + "race sex \n", + "1 1 0.0 0.0 0.0 \n", + " 0 0.0 0.0 0.0 \n", + " 1 0.0 0.0 1.0 \n", + " 1 0.0 0.0 1.0 \n", + " 1 0.0 0.0 1.0 \n", + "\n", + " workclass_Self-emp-inc workclass_Self-emp-not-inc \\\n", + "race sex \n", + "1 1 0.0 1.0 \n", + " 0 0.0 1.0 \n", + " 1 0.0 0.0 \n", + " 1 0.0 0.0 \n", + " 1 0.0 0.0 \n", + "\n", + " workclass_State-gov workclass_Without-pay education_10th \\\n", + "race sex \n", + "1 1 0.0 0.0 0.0 \n", + " 0 0.0 0.0 0.0 \n", + " 1 0.0 0.0 0.0 \n", + " 1 0.0 0.0 0.0 \n", + " 1 0.0 0.0 1.0 \n", + "\n", + " education_11th education_12th ... native-country_Thailand \\\n", + "race sex ... \n", + "1 1 0.0 0.0 ... 0.0 \n", + " 0 0.0 0.0 ... 0.0 \n", + " 1 0.0 0.0 ... 0.0 \n", + " 1 0.0 0.0 ... 0.0 \n", + " 1 0.0 0.0 ... 0.0 \n", + "\n", + " native-country_Trinadad&Tobago native-country_United-States \\\n", + "race sex \n", + "1 1 0.0 1.0 \n", + " 0 0.0 0.0 \n", + " 1 0.0 1.0 \n", + " 1 0.0 0.0 \n", + " 1 0.0 1.0 \n", + "\n", + " native-country_Vietnam native-country_Yugoslavia age \\\n", + "race sex \n", + "1 1 0.0 0.0 58.0 \n", + " 0 0.0 0.0 51.0 \n", + " 1 0.0 0.0 26.0 \n", + " 1 0.0 0.0 44.0 \n", + " 1 0.0 0.0 33.0 \n", + "\n", + " education-num capital-gain capital-loss hours-per-week \n", + "race sex \n", + "1 1 11.0 0.0 0.0 42.0 \n", + " 0 12.0 0.0 0.0 30.0 \n", + " 1 14.0 0.0 1887.0 40.0 \n", + " 1 3.0 0.0 0.0 40.0 \n", + " 1 6.0 0.0 0.0 40.0 \n", + "\n", + "[5 rows x 103 columns]" + ] }, - "execution_count": 6, + "execution_count": 24, "metadata": {}, "output_type": "execute_result" } @@ -146,9 +570,9 @@ "source": [ "ohe = make_column_transformer(\n", " (OneHotEncoder(sparse=False), X_train.dtypes == 'category'),\n", - " remainder='passthrough')\n", - "X_train = pd.DataFrame(ohe.fit_transform(X_train), index=X_train.index)\n", - "X_test = pd.DataFrame(ohe.transform(X_test), index=X_test.index)\n", + " remainder='passthrough', verbose_feature_names_out=False)\n", + "X_train = pd.DataFrame(ohe.fit_transform(X_train), columns=ohe.get_feature_names_out(), index=X_train.index)\n", + "X_test = pd.DataFrame(ohe.transform(X_test), columns=ohe.get_feature_names_out(), index=X_test.index)\n", "\n", "X_train.head()" ] @@ -167,8 +591,271 @@ "outputs": [ { "data": { - "text/html": "
\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n
ageeducation-numcapital-gaincapital-losshours-per-weekworkclass_Federal-govworkclass_Local-govworkclass_Privateworkclass_Self-emp-incworkclass_Self-emp-not-inc...native-country_Portugalnative-country_Puerto-Riconative-country_Scotlandnative-country_Southnative-country_Taiwannative-country_Thailandnative-country_Trinadad&Tobagonative-country_United-Statesnative-country_Vietnamnative-country_Yugoslavia
racesex
00125.07.00.00.040.000100...0000000100
11138.09.00.00.050.000100...0000000100
21128.012.00.00.040.001000...0000000100
30144.010.07688.00.040.000100...0000000100
51134.06.00.00.030.000100...0000000100
\n

5 rows × 100 columns

\n
", - "text/plain": " age education-num capital-gain capital-loss hours-per-week \\\n race sex \n0 0 1 25.0 7.0 0.0 0.0 40.0 \n1 1 1 38.0 9.0 0.0 0.0 50.0 \n2 1 1 28.0 12.0 0.0 0.0 40.0 \n3 0 1 44.0 10.0 7688.0 0.0 40.0 \n5 1 1 34.0 6.0 0.0 0.0 30.0 \n\n workclass_Federal-gov workclass_Local-gov workclass_Private \\\n race sex \n0 0 1 0 0 1 \n1 1 1 0 0 1 \n2 1 1 0 1 0 \n3 0 1 0 0 1 \n5 1 1 0 0 1 \n\n workclass_Self-emp-inc workclass_Self-emp-not-inc ... \\\n race sex ... \n0 0 1 0 0 ... \n1 1 1 0 0 ... \n2 1 1 0 0 ... \n3 0 1 0 0 ... \n5 1 1 0 0 ... \n\n native-country_Portugal native-country_Puerto-Rico \\\n race sex \n0 0 1 0 0 \n1 1 1 0 0 \n2 1 1 0 0 \n3 0 1 0 0 \n5 1 1 0 0 \n\n native-country_Scotland native-country_South \\\n race sex \n0 0 1 0 0 \n1 1 1 0 0 \n2 1 1 0 0 \n3 0 1 0 0 \n5 1 1 0 0 \n\n native-country_Taiwan native-country_Thailand \\\n race sex \n0 0 1 0 0 \n1 1 1 0 0 \n2 1 1 0 0 \n3 0 1 0 0 \n5 1 1 0 0 \n\n native-country_Trinadad&Tobago native-country_United-States \\\n race sex \n0 0 1 0 1 \n1 1 1 0 1 \n2 1 1 0 1 \n3 0 1 0 1 \n5 1 1 0 1 \n\n native-country_Vietnam native-country_Yugoslavia \n race sex \n0 0 1 0 0 \n1 1 1 0 0 \n2 1 1 0 0 \n3 0 1 0 0 \n5 1 1 0 0 \n\n[5 rows x 100 columns]" + "text/html": [ + "
\n", + "\n", + "\n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + " \n", + "
ageeducation-numcapital-gaincapital-losshours-per-weekworkclass_Privateworkclass_Self-emp-not-incworkclass_Self-emp-incworkclass_Federal-govworkclass_Local-gov...native-country_Guatemalanative-country_Nicaraguanative-country_Scotlandnative-country_Thailandnative-country_Yugoslavianative-country_El-Salvadornative-country_Trinadad&Tobagonative-country_Perunative-country_Hongnative-country_Holand-Netherlands
racesex
0125.07.00.00.040.010000...0000000000
1138.09.00.00.050.010000...0000000000
128.012.00.00.040.000001...0000000000
0144.010.07688.00.040.010000...0000000000
1134.06.00.00.030.010000...0000000000
\n", + "

5 rows × 103 columns

\n", + "
" + ], + "text/plain": [ + " age education-num capital-gain capital-loss hours-per-week \\\n", + "race sex \n", + "0 1 25.0 7.0 0.0 0.0 40.0 \n", + "1 1 38.0 9.0 0.0 0.0 50.0 \n", + " 1 28.0 12.0 0.0 0.0 40.0 \n", + "0 1 44.0 10.0 7688.0 0.0 40.0 \n", + "1 1 34.0 6.0 0.0 0.0 30.0 \n", + "\n", + " workclass_Private workclass_Self-emp-not-inc \\\n", + "race sex \n", + "0 1 1 0 \n", + "1 1 1 0 \n", + " 1 0 0 \n", + "0 1 1 0 \n", + "1 1 1 0 \n", + "\n", + " workclass_Self-emp-inc workclass_Federal-gov workclass_Local-gov \\\n", + "race sex \n", + "0 1 0 0 0 \n", + "1 1 0 0 0 \n", + " 1 0 0 1 \n", + "0 1 0 0 0 \n", + "1 1 0 0 0 \n", + "\n", + " ... native-country_Guatemala native-country_Nicaragua \\\n", + "race sex ... \n", + "0 1 ... 0 0 \n", + "1 1 ... 0 0 \n", + " 1 ... 0 0 \n", + "0 1 ... 0 0 \n", + "1 1 ... 0 0 \n", + "\n", + " native-country_Scotland native-country_Thailand \\\n", + "race sex \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + " 1 0 0 \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + "\n", + " native-country_Yugoslavia native-country_El-Salvador \\\n", + "race sex \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + " 1 0 0 \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + "\n", + " native-country_Trinadad&Tobago native-country_Peru \\\n", + "race sex \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + " 1 0 0 \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + "\n", + " native-country_Hong native-country_Holand-Netherlands \n", + "race sex \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + " 1 0 0 \n", + "0 1 0 0 \n", + "1 1 0 0 \n", + "\n", + "[5 rows x 103 columns]" + ] }, "execution_count": 7, "metadata": {}, @@ -176,8 +863,6 @@ } ], "source": [ - "# there is one unused category ('Never-worked') that was dropped during dropna\n", - "X.workclass.cat.remove_unused_categories(inplace=True)\n", "pd.get_dummies(X).head()" ] }, @@ -195,7 +880,15 @@ "outputs": [ { "data": { - "text/plain": " race sex\n30149 1 1 0\n12028 1 0 1\n36374 1 1 1\n8055 1 1 0\n38108 1 1 0\ndtype: int64" + "text/plain": [ + "race sex\n", + "1 1 0\n", + " 0 1\n", + " 1 1\n", + " 1 0\n", + " 1 0\n", + "dtype: int64" + ] }, "execution_count": 8, "metadata": {}, @@ -227,7 +920,9 @@ "outputs": [ { "data": { - "text/plain": "0.8375469890174688" + "text/plain": [ + "0.8455074813886637" + ] }, "execution_count": 9, "metadata": {}, @@ -235,7 +930,7 @@ } ], "source": [ - "y_pred = LogisticRegression(solver='lbfgs').fit(X_train, y_train).predict(X_test)\n", + "y_pred = LogisticRegression(solver='liblinear').fit(X_train, y_train).predict(X_test)\n", "accuracy_score(y_test, y_pred)" ] }, @@ -253,7 +948,9 @@ "outputs": [ { "data": { - "text/plain": "0.2905425926727236" + "text/plain": [ + "0.26889803976599136" + ] }, "execution_count": 10, "metadata": {}, @@ -282,7 +979,9 @@ "outputs": [ { "data": { - "text/plain": "0.09372170954260936" + "text/plain": [ + "0.09875694175767563" + ] }, "execution_count": 11, "metadata": {}, @@ -290,7 +989,39 @@ } ], "source": [ - "average_odds_error(y_test, y_pred, prot_attr='sex')" + "average_odds_error(y_test, y_pred, priv_group=(1, 1))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In that case, we chose to look at the intersection of all protected attributes (race and sex) and designate a single combination (white males) as privileged.\n", + "\n", + "If we wish to do something more complex, we can pass a custom array of protected attributes, like so (note: this choice of protected groups is just for demonstration):" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "0.3844295196608744" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "race = y_test.index.get_level_values('race').to_numpy()\n", + "sex = y_test.index.get_level_values('sex').to_numpy()\n", + "prot_attr = np.where(race ^ sex, 0, 1)\n", + "disparate_impact_ratio(y_test, y_pred, prot_attr=prot_attr)" ] }, { @@ -309,17 +1040,20 @@ }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 13, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", - "text": "0.8279649148669566\n{'estimator__C': 10, 'reweigher__prot_attr': 'sex'}\n" + "text": [ + "0.839979361686445\n", + "{'estimator__C': 1, 'reweigher__prot_attr': 'sex'}\n" + ] } ], "source": [ - "rew = ReweighingMeta(estimator=LogisticRegression(solver='lbfgs'))\n", + "rew = ReweighingMeta(estimator=LogisticRegression(solver='liblinear'))\n", "\n", "params = {'estimator__C': [1, 10], 'reweigher__prot_attr': ['sex']}\n", "\n", @@ -331,14 +1065,16 @@ }, { "cell_type": "code", - "execution_count": 13, + "execution_count": 14, "metadata": {}, "outputs": [ { "data": { - "text/plain": "0.5676803237673037" + "text/plain": [ + "0.5843724951518126" + ] }, - "execution_count": 13, + "execution_count": 14, "metadata": {}, "output_type": "execute_result" } @@ -356,14 +1092,24 @@ }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 15, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "2021-11-24 16:59:47.326474: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 FMA\n", + "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n" + ] + }, { "data": { - "text/plain": "0.8399056534237488" + "text/plain": [ + "0.8380629468563426" + ] }, - "execution_count": 14, + "execution_count": 15, "metadata": {}, "output_type": "execute_result" } @@ -376,14 +1122,16 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 16, "metadata": {}, "outputs": [ { "data": { - "text/plain": "0.060623189820735834" + "text/plain": [ + "0.08330040163726551" + ] }, - "execution_count": 15, + "execution_count": 16, "metadata": {}, "output_type": "execute_result" } @@ -401,7 +1149,7 @@ }, { "cell_type": "code", - "execution_count": 16, + "execution_count": 17, "metadata": {}, "outputs": [], "source": [ @@ -419,21 +1167,23 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 18, "metadata": {}, "outputs": [ { "data": { - "text/plain": "0.8163190093609494" + "text/plain": [ + "0.8199307142330655" + ] }, - "execution_count": 17, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } ], "source": [ "cal_eq_odds = CalibratedEqualizedOdds('sex', cost_constraint='fnr', random_state=1234567)\n", - "log_reg = LogisticRegression(solver='lbfgs')\n", + "log_reg = LogisticRegression(solver='liblinear')\n", "postproc = PostProcessingMeta(estimator=log_reg, postprocessor=cal_eq_odds, random_state=1234567)\n", "\n", "postproc.fit(X_train, y_train)\n", @@ -442,14 +1192,15 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 19, "metadata": {}, "outputs": [ { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfUAAAEKCAYAAAALjMzdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd1gU1/7/32fpXTpSBAWWZSk2ghI1dsX8DBZUVG5sMbZ4NUZTboomxniTqxhDjFETG2q+9hi7NyaCSbwxgiJSlqIU6SCydNhlz++P3SULLLBI2QXP63nmWWbmzMxnhoXPnPZ+E0opGAwGg8Fg9Hw46g6AwWAwGAxG58CSOoPBYDAYvQSW1BkMBoPB6CWwpM5gMBgMRi+BJXUGg8FgMHoJLKkzGAwGg9FL6NKkTggJJIQkE0LSCCHvKdn/JSEkVrakEEJKFfYtJISkypaFXRkng8FgMBi9AdJV89QJIVoAUgBMBJAN4A6AeZTSxBbK/xPAYErpEkKIBYBoAH4AKIAYAEMppU+7JFgGg8FgMHoBXVlT9weQRil9RCmtA3AcwLRWys8D8H+ynycD+JlSWiJL5D8DCOzCWBkMBoPB6PFod+G5HQA8VljPBjBMWUFCiDOA/gB+beVYByXHLQOwDACMjIyG8ni8jketJvLygNxcYPBggMNGOjA0lJiYmGJKqbW642AwGMrpyqTeHuYCOE0prW/PQZTSfQD2AYCfnx+Njo7uiti6hddeAy5dAu7eVXckDEbLEEIy1R0Dg8Foma6sE+YAcFJYd5RtU8Zc/N303t5jewXp6UD//uqOgsFgMBg9ma5M6ncAuBNC+hNCdCFN3OebFiKE8ACYA/ifwuZrACYRQswJIeYAJsm29VpYUmcwGAxGR+mypE4pFQNYDWkyTgJwklKaQAjZTAgJUig6F8BxqjAMn1JaAuBTSF8M7gDYLNvWKxGLgcePWVJnMBgMRsfo0j51SullAJebbNvYZP3jFo49AOBAlwWnQTx+DNTXs6TO6L3ExMTYaGtrfw/AG0z0isF4ViQA4sVi8dKhQ4cWKiugKQPlnmvS06WfLKn3cqKjge+/Bz79FLB+vgaQa2trf29nZ+dpbW39lMPhdI04BoPRy5FIJKSoqIifn5//PYAgZWXYG7MGkJEh/WRJvZezbRtw/Digr6/uSNSBt7W1dRlL6AzGs8PhcKi1tbUQ0hYv5WW6MR5GC6SnS+emOzm1XZbRQ0lPB06fBpYvB0xM1B2NOuCwhM5gdBzZ31GLuZsldQ0gPR1wdAR0dNQdCaPL2LkT0NIC1qxRdyQMBqMXw5K6BsCms/VySkqkfenz5wMOzYQRGQwGo9NgSV0DYEm9l7NnD1BVBaxfr+5InnuOHDnShxAy9N69ew0DG5KTk3Xd3d29AODixYsmY8eOdevodYKDg10OHjxoDgAhISHOMTEx+gBgaGg4uCPnvXjxosnPP/9s1N7jHBwcfPLy8lQaGB0eHm65YMGCfu2PTjmjR492Ky4u1gKALVu22AwYMMArKCio/7Fjx8zef/99u866jhyJRILhw4dzS0pKOACgpaU1lMfj8eVLcnKybmdfU86zPrvc3FztUaNGuXdGDGz0u5qprpbqvrOk3kuprQXCw4HAQMDHR93RPPccP37cYsiQIRUREREWgwcPzu2Oa544caJd0roikQg6LfTF/frrrybGxsb1EydOrOyU4LqBqKioNPnP+/fvt75+/XqKq6urSLZJqOp5Wnsuipw8edLMy8ur2sLCQgIAenp6EoFAoNQdVFOwt7cX29raiv773/8aTZo0qUO/W5bU1Uym7M+dJfVeytGjQEEBsGGDuiPRGJYsgVN8PAw785ze3qg6cKCRCVQzhEIh586dO8bXr19PDgoKcv/yyy9VTupisRirVq1yvHHjhhkhhC5cuLD4gw8+KNywYUPfq1ev9qmtreX4+flVHDt2LJPTxJHJ39/fY/v27Y9feumlKgB47bXXnKKiokytra1FZ86ceWRvby/29/f38Pb2rvrrr7+Mg4ODSzw8PGo+//zzviKRiGNubi4+ceLEo6qqKk5ERIQ1h8OhJ0+etNy5c2eWr69vzeLFi51zcnJ0AWDHjh1ZkyZNqszPz9cKDg4eUFBQoDt06NCKliy2T58+bbpx40aH+vp6YmFhIf7f//6Xorj/hx9+MGsah5OTk/jSpUvG69ev7wcAhBDcunVLUFZWphUcHDygoqJCq76+nnz99deZgYGBFQ4ODj7R0dFJ69evt8/OztabMmWKe2hoaLG5uXl9dHS0UURERFZubq62svt466237B89eqSXlZWl5+DgULtp06a8xYsX9xeJREQikeDMmTMPfXx8ahVjPnbsmMXy5cuL2/p9vvHGG45//PGHSV1dHXn99dcL33777eKLFy+afPLJJ/ampqbi5ORkw6CgoBIfH5/q3bt329bW1pIff/zxoZeXV21Lz0XxGi3dk7JnZ25uLpk+fXppRESEZUeTOmt+VzNsOlsvRiIBwsKAQYOAcePUHc1zzw8//NBnzJgxQl9f31pzc3Pxb7/9pvKLRVhYmHVWVpZuYmJiQkpKSuLSpUufAMDbb79dGB8fn5SamppQXV3NOX78uFlr56murub4+flVpqWlJYwYMaL8vffes5fvq6urI/Hx8UmffPJJwcSJEytiY2MFSUlJibNmzSrZvHmznYeHR92CBQuKVqxYUSAQCBIDAwMrli9f7vTWW28VxMfHJ/34448PV6xY4QIA7733nn1AQEBFWlpawowZM0rz8vKaNTnn5uZqr1692uXs2bMPk5OTE8+dO/ewaRllccieh114eHimQCBI/PPPPwXGxsaSAwcOWIwfP14oEAgSk5KSEoYNG1bV5Pln2djYiKKiolI2bdrUSDilpfsAgNTUVP2bN28mX7hwIf3rr7+2XrVqVYFAIEiMi4tL6t+/f13TmGNiYoxHjBjRkBhra2s58qb3iRMnugLAzp07rczMzOrj4+OT7t+/n3T48GFrgUCgCwACgcDgwIEDWampqfGnT5+2TElJ0X/w4EHSq6++WhwWFmbT2nNR5Z6UPTsAGDFiROVff/1lrORr0y5YTV3NyIVnXFzUGgajK7hyBUhKAo4dAwhRdzQaQ1s16q7i5MmTFmvWrCkEgODg4JIjR45YjBo1qqqt4wDg119/NV2xYkWRvPnX1ta2HgCuXLlismPHDruamhpOaWmpNp/Pr0YrTcocDgdLly4tAYAlS5Y8mTlzZkP//bx58xqksNPT03WnT5/uWFRUpFNXV8dxcnKqVXa+P/74wzQ1NdVAvl5RUaElFAo5f/75p8nZs2fTAGDu3LnC5cuXN3PAjIyMNPL39y/n8Xh1ivekSEtxDB8+vGLDhg1Oc+bMKZk3b95TV1dXyfDhwyuXL1/uIhKJOLNmzXr64osvVrf+VNu+DwAIDAwsNTY2pgAQEBBQuX379r7Z2dm6c+fOfdq0lg4AQqFQ29zcXCJfV9b8fv36dVOBQGB4/vx5cwAoLy/XSkxM1NfV1aU+Pj6Vzs7OIgDo169f7ZQpU4QAMHDgwOqoqCiT1p6LKvek7NkB0ib4wsLCDvf3s5q6mklPB3R1AXv7tssyehjbtknFB2bPVnckzz0FBQVaf/75p8kbb7zh7ODg4LNr1y67CxcumEskkrYPboGqqiqyfv1657Nnzz5MSUlJ/Mc//lFcU1PTrv+pROFlz8TEpCGY1atX91u1alVhSkpK4q5duzJra2uVnpdSirt37yYJBIJEgUCQWFhYGGdmZvbsN9WEluLYunVr/vfff59ZXV3NGTVqFO/evXv6U6ZMqbh582ayg4ND3ZIlS/rv2rXLUtXrtHYfRkZGDfezYsWKkp9++inNwMBAMnXqVPfz5883E33Q0tKi9fWtu3hTSklYWFiW/Ho5OTkPZs6cWQYAenp6DX0VHA4H+vr6VP5zfX09ae25qHJPyp4dIP0+6enpdfh3x5K6mklPB5ydpeIzjF7EnTtAVBSwbh0TINAAjhw5Yj5jxoyS3NzcBzk5OQ/y8/PjHB0d665du6ZSc+f48ePL9u7dayUSScd3FRQUaFVVVXEAwM7OTiwUCjkXLlwwb+s8EokE8lHxhw4dsvT39y9XVq68vFyrX79+Ink5+XYTE5P68vJyLfn6yJEjy/7973/byNdv3bplAADDhw8vlx938uRJ07KyMi00YcyYMZV//fWXibzZuaCgoFmZluJISEjQ8/f3r/7ss8/yfX19K+Pj4/VTUlJ0HR0dRevXry9esGBB0d27d1Xu3mjpPpqSmJio6+npWfvhhx8WTp48uTQ2NrZZuf79+9ckJSXptXa9iRMnCr/99lvr2tpaAgBxcXF6ZWVlKv8Xbum5qHJPyp4dAMTHx+tzuVyVWzdagqUSNcOms/VStm8HzMyApUvVHQkDwKlTpyxmzpz5VHHbtGnTnh49etRClePXrVtX5OjoWMfj8bw8PDz4+/fvt7CysqoPDQ0t8vT09Bo7dix34MCBbQ5wMjAwkPz1119G7u7uXjdv3jT597//naes3AcffJA7b948Vy8vL09LS8uGAVjBwcGlly5d6sPj8fhXr1413rdv3+O7d+8acblcvqurq9euXbusAeDzzz/P/eOPP4zd3Ny8zp49a963b99mfc/29vbi8PDwjBkzZrh5eHjwZ8yYMUDVOP7zn//YuLu7e3G5XL6Ojg6dNWuW8Nq1ayaenp5enp6e/DNnzli88847Bao8WwBo6T6acvToUQsul+vF4/H4SUlJBsuXL3/StMykSZOE//3vf1uVbVy3bl0xj8er8fHx8XR3d/d6/fXXnUUikcp9ZC09F1XuSdmzA4Cff/7ZJDAwUOXZAC1BWhoV2dPw8/Oj0dHR6g6j3VhaSltn9+xRdySMTiM9HXBzk454/+ILdUfTqRBCYiilfu097v79+xkDBw5sdUQyg9EZZGZm6sybN8/l1q1bqeqOpT34+fl5XLlyJc3a2rr1vgMA9+/ftxo4cKCLsn2spq5GysqkYmOspt7LYJKwDIbacHZ2Fi1ZsqRYLj7TE8jNzdVeu3ZtgSoJvS3Y6Hc1wqaz9UKYJCyDoXaWLl36tO1SmoO9vb341VdfLe2Mc/WYN5neCJvO1gthkrAMBkONsKSuRuRJndXUewlMEpbBYKiZLk3qhJBAQkgyISSNEPJeC2XmEEISCSEJhJAfFLbXE0JiZcv5roxTXaSnA0ZGgJWVuiNhdApMEpbBYKiZLutTJ4RoAfgGwEQA2QDuEELOU0oTFcq4A/gXgBGU0qeEEBuFU1RTSgd1VXyagHw6GxMb6wUwSVgGg6EBdGVN3R9AGqX0EaW0DsBxANOalHkdwDeU0qcAQCktxHMEm6Pei5BLwr79NntL02CY9Wrb9DbrVULI0GnTpjX8pxWJRDA3Nx/Y1u/5Wb8LNTU1xM/Pz0MuVNTddOXodwegkcZzNoBhTcpwAYAQ8gcALQAfU0qvyvbpE0KiAYgBfE4pPdeFsXY7lEqTOqvU9RKYJGyPgFmvdj/qtl41MDCQJCcnG1RUVBBjY2P6448/mtra2nZZxtXX16ejR48u+/777y1WrlxZ0vYRnYu6B8ppA3AHMAbAPADfEUL6yPY5y0Qu5gPYSQhxbXowIWQZISSaEBJdVFTUXTF3Ck+eAJWVrKbeK2CSsO1jyRIn+Pt7dOqyZIlTW5eVW68ePHgw48cff1RJSU6OWCzGsmXLHOVKYJ999pkNAGzYsKGvt7e3p7u7u9e8efOclWnJ+/v7e9y8ebNBMvW1115zcnNz8woICODm5uZqy8ssWbLEydvb23PLli22P/zwg5mvry/P09OT/+KLL3IfP36snZycrBsREWG9Z88eW7miXG5urvbkyZNdvb29Pb29vT3/+9//GgFAfn6+1ogRI9zd3Ny8QkJCnFuzXuXz+Z4eHh78gIAAbtP9yuIAgEuXLhnLnc88PT35T58+5WRmZur4+fl58Hg8vru7u9fVq1eNgb9bCebPn99Pbr36ySef2Ci2CLR0H2+99Zb99OnT+w8ZMoQ3c+bM/tHR0fo+Pj6ePB6Pz+Vy+Q8ePGgmB3vs2DGLGTNmNJoeNmHCBOGpU6f6AMD//d//WQQHBzck2xs3bhgOGjSI5+npyR88eDDv/v37zc5ZVlbGmT17touPj4+np6cn/+jRo30AoKV4Zs2aVXr8+PF2fcc6i65M6jkAFP/QHGXbFMkGcJ5SKqKUpgNIgTTJg1KaI/t8BCASQLNmK0rpPkqpH6XUz9paqaqgxsJGvvcimCRsj4BZrzbmebFeBYBXX3215MSJE+ZVVVUkKSnJMCAgoGH/wIEDa+7cuSNISkpK3LRpU84777zj2PSc77//ft+xY8eWPXjwIOm3335L/vDDDx3Lyso4LcXzwgsvVMfFxbW7m6Qz6Mrm9zsA3Akh/SFN5nMhrXUrcg7SGvpBQogVpM3xjwgh5gCqKKW1su0jAPynC2Ptdtgc9V5Cejpw+rR0xLtJq3LTDDkHDjDrVTDrVVXuA+i49SoADBs2rDo7O1vvu+++s5gwYUKj309JSYlWSEhI/4yMDH1CCFWmAR8ZGWl67dq1PuHh4XYAUFtbS9LS0nRbikdbWxs6Ojr06dOnnKaxdDVdVlOnlIoBrAZwDUASgJOU0gRCyGZCSJCs2DUATwghiQBuAHibUvoEgCeAaELIfdn2zxVHzfcGWE29l8AkYXsEzHr12ehN1quBgYGlmzZtclqwYEGjfu53333XYfTo0eWpqakJFy5cSKurq1Nqo3r69Ok0eXx5eXkPhgwZUtNaPCKRiBgaGna7uUqX9qlTSi9TSrmUUldK6WeybRsppedlP1NK6VuUUj6l1IdSely2/ZZsfaDsc39XxqkO0tMBCwvA1FTdkTCeGSYJ22Ng1qvMenXlypXFGzZsyPX392/UglBWVqbl6OhYBwB79+5VqhoyduzYsrCwMFv5S+Aff/xh0Fo8+fn5Wn369BErerN3F+oeKPfcwqaz9QKYJGyPgVmvMutVV1dX0Ycffths2vS7776b//HHHzt6enryxWKlLqr4/PPPc8ViMeHxeHw3NzevDz/80KG1eK5cuWLatJm/u2DWq2qCywUGDgROnVJ3JIxnorYWcHYGBg+WzlF/TmDWqwxNRxOsVydNmuS6ffv2bF9fX6VjIToKs17VMCQSIDOT1dR7NEwSlsHQSNRtvVpTU0OCgoJKuyqhtwWzXlUDeXlAXR1L6j0WJgnLYGg06rRe1dfXp6tXr27WLdBdsKSuBth0th6OXBL22DEmCctgMDQK1vyuBth0th4Ok4RlMBgaCkvqaoDV1HswTBKWwWBoMCypq4H0dKBvX0Bfv+2yDA2DScJ2CxIJ8MsvMIqIQJ9ffoFRBzRiGnj48KHO+PHjXZ2dnb2dnJy8Fy9e7FRTU6O0/yQjI0MnMDCw2RSvpig6kLWXt956y37jxo22qpbvqMObIv/5z3+s5eIw9+7d05druCckJOgNHjyY19HzBwYGDkhMTNQFpNrvXC6XL9eKfxaXOVXpqc5qnQlL6mqAzVHvocglYZcvZ5KwXciJEzCzt4dvUBC4q1bB5ZVXwLW3h++JE2hVV701JBIJpk+f7hYUFFSamZkZn56eHl9ZWclZu3ZtM9UgkUgEFxcX0dWrVx+1dd6oqKg0Kyur5vJlGs4777xTJB/MderUqT5BQUFPk5KSEr28vGrv3bsnUPU8EokETdXboqOj9evr6wmfz2+YGx8VFZUiV2PTRIc5RWc1dcfSUVhSVwMZGSyp90iYJGyXc+IEzBYuxICCAuhUVYFTWQmt6mpwCgqgs3AhBjxrYr9w4YKJnp6eZO3atU8AqTb3nj17Hp84ccKqvLycEx4ebjlu3Di34cOHc1988UUPRY/18vJyzssvvzzA1dXVa+LEia6+vr48ueua3IEsOTlZd8CAAV5z5851dnNz8xoxYoR7RUUFAYCwsDArb29vTw8PD/7kyZNdy8vLW/2/+/jxY+2JEye6enh48D08PJrVbIVCIScgIIDL5/M9uVxug2NYWVkZZ8yYMW4eHh58d3d3r++++84cAFatWuXg6urqxeVy+cuWLXME/m4lOHHihNm+fftsDx06ZD1s2DAu0LhF4KOPPrL19vb25HK5/HXr1tkDUv95FxcX7xkzZrhwuVyvhw8fNjKLOXTokOUrr7zSyCVNGS2du3///l7BwcEuLi4u3kFBQf3PnTtnMmTIEJ6zs7P3jRs3DIHe56zWmbDR792MSAQ8fsySeo+DScJ2ORIJsGYNnGtrlVc2amvBWbsWzrNnI47TzurIgwcPDAYOHNjIvMXCwkLSt2/fusTERD0ASEhIMIyLi0uwtbWtT05ObkhU27Zts+7Tp0/9w4cPE+7cuaMfEBDgpewaWVlZ+kePHn304osvZr788ssDIiIizFetWlUSGhr6dP369cUAsGbNGvvw8HCrDz74oJmymZwVK1b0GzVqVPnGjRsfisViCIXCRs37hoaGkkuXLqVZWFhI8vLytIcNG8abP39+6dmzZ03t7OxEkZGRaQDw5MkTrfz8fK3Lly+bP3r0KJ7D4aBpV0FISIjw9u3bRcbGxvWbN29upAB39uxZ07S0NP24uLgkSikmTJjgduXKFeMBAwbUZWVl6e3fvz99/PjxGU3jv337tnFTffXRo0dzORwOdHV1JXFxcYLWzv348WP9EydOPBo6dGiGr6+v57Fjxyyjo6MFP/zwQ5/PPvus79ixYx/KndV0dHRw7tw5k3feecfx2rVrjVzm5M5qp06dyiguLtby8/PzDAoKKpM7q61cubKkpqaGyFXk1Oms1pmwpN7NPH4s/efFBsn1MJgkbJdz4waMKirQav90eTm0IiNhNG4cOr0Jd9SoUWXKnMpu3bplvHbt2kIAeOGFF2q4XK5SZzcHB4dauTPZ4MGDqzIyMvQAICYmxmDjxo0O5eXlWpWVlVqjR49uVT701q1bJqdPn04HpC0KlpaWjWKSSCTkzTffdPzzzz+NORwOCgsLdbOzs7WHDBlS/cEHHzitXLnSYdq0acLAwMAKkUgEPT09SUhIiMvUqVNLQ0JCVJYuvXr1qunNmzdN+Xw+HwCqqqo4AoFAf8CAAXV9+/atGz9+vNLfQVFRkY6dnV2jzumoqKiUvn37NmiwtnZuBweHWrk+O5fLrR43blwZh8PBkCFDqrZs2WIP9D5ntc6ENb93M2w6Ww+kthYIDwcCAwEfH3VH02vJyYEOIWhVt5oQ0OxstHvagbe3d/X9+/cbGYyUlJRw8vLydPl8fi0grQG397yK6OrqNsSupaVFxWIxAYBly5b137VrV1ZKSkriu+++m9uS45qq7N271+LJkyfaDx48SBIIBImWlpai6upqjq+vb+3du3cTfXx8qj/66COHDRs29NXR0UFsbGzSrFmznl68eLHPmDFj3FW9DqUUb775Zp68LzwrKyt+3bp1xUDrz0pPT09SXV3d6j22dm7F58jhcKCvr08BQEtLC/X19QTofc5qnQlL6t0MS+o9ECYJ2y04OEAkkaBVNR9KQRwd0e4hykFBQeU1NTUc+YhvsViMVatWOc2ePbtY0fJUGQEBARXHjx83B4CYmBj9lJQUpQ5iLVFVVcXp16+fqLa2lqjSZztixIjybdu2WcvjfPLkSaPWC6FQqGVlZSXS09OjFy5cMMnNzdUFpCP2TUxMJKtWrSp566238mNjYw2FQiFHVqsV7tmz57FAIFDZOW3KlCllR44csZL7mqenp+vk5OS02brr7u6u1CWtM84tp7c5q3UmrPm9m0lPl461cnJSdyQMlWCSsN3G2LGoNDFBfXV1y5UNExPUjxnT/qZ3DoeDc+fOpS1btsx527ZtfSUSCcaNGycMDw/PaevYt99+u2jOnDkurq6uXq6urjVubm415ubmKo94f++993L9/f09LSwsxEOGDKmoqKhotYvh22+/zVq0aJEzl8u14nA42LVrV+aECRMa7nnp0qUlU6ZMceNyuXxfX9+q/v371wDSZv5//etfjhwOB9ra2nT37t2ZpaWlWlOnTnWrra0lAPDpp58+VjXumTNnliUkJOi/8MILPEBaOz927Fi6trZ2q0lvypQppb/++qvJ9OnTldrKduTcct599938pUuX9v/iiy/sJ06cqHRQ3ueff567bNmyfjwejy+RSIiTk1PtjRs30o4ePWpx8uRJS21tbWptbS369NNP8wD1Oqt1JsylrZuZPx/43//+rrEzNJxLl4CpU6WSsPPnqzsatdPVLm3y0e/KBsvp6UFy+DAehYSgW//xisVi1NXVEUNDQ5qQkKA3adIk7sOHD+PlzcKMxlRUVJARI0Z4xMTECLS1e069saud1TqT1lzaes4T7yWw6Ww9DCYJ263IEvajtWvhXF4OLUJAKQUxMUH9V18hs7sTOiCd0jZq1CgPkUhEKKX48ssvM1lCbxljY2O6cePG3PT0dF13d/dmPu6aiLqd1ToTltS7mfR04OWX1R0FQyXkkrA7djBJ2G4kJATC2bMRFxkJo+xs6Dg6QjRmDCrbO42tszA3N5fEx8cnqefqPZPg4OAydcfQHtTtrNaZsKTejVRXA/n5bDpbjyEsjEnCqgkOB+iKaWsMRm+nS999CSGBhJBkQkgaIeS9FsrMIYQkEkISCCE/KGxfSAhJlS0LuzLO7iIjQ/rJmt97AOnpwKlTTBKWwWD0KLqspk4I0QLwDYCJALIB3CGEnKeUJiqUcQfwLwAjKKVPCSE2su0WADYB8ANAAcTIjlWb8X1nwKaz9SCYJCyDweiBdGVN3R9AGqX0EaW0DsBxANOalHkdwDfyZE0plUsnTgbwM6W0RLbvZwCBXRhrt8CSeg+BScIyGIweSlcmdQcAinMis2XbFOEC4BJC/iCE/EkICWzHsSCELCOERBNCoouKijox9K4hPR3Q0wPs7NQdCaNVmCSs+pF6rxohIqIPfvnFCJ3gvcqsV/+mu61Xhw4d6qG4n8fj8eWGOS2haKrTXl588UVuUVHRM/1eejrqVpTTBuAOYAyAeQC+I4T0UfVgSuk+SqkfpdTP2tq6i0LsPDIypIPk1DWKl5SXMIYAACAASURBVKECTBJW/Zw4YQZ7e18EBXGxapULXnmFC3t7X5w4waxXO4nutl6trKzUSktL0wGAu3fv6nfSbbTIvHnznmzfvl3zk0IX0JXpJQeAom6ao2ybItkAzlNKRZTSdAApkCZ5VY7tcTAf9R4Ak4RVLydOmGHhwgEoKNBBVRUHlZVaqK7moKBABwsXDnjWxM6sV9VrvTp9+vSSiIgICwCIiIiwCA4ObnBxS05O1h06dKgHn8/35PP5nk3vF5AKAC1fvtxRHsu2bdusACAzM1PHz8/PQ17zv3r1qjEAzJ07t/Ts2bOWrT3n3kpXJvU7ANwJIf0JIboA5gI436TMOUhr6SCEWEHaHP8IwDUAkwgh5oQQcwCTZNt6NCypazhMEla9SL1XndGS4UltLQdr1zo/S1O8qtarP/3008M7d+4kK5ZTtF7dunVrTmJiolJ7zqysLP01a9YUpqWlJZiZmdVHRESYA0BoaOjT+Pj4pOTk5EQPD4/q8PBwpVrlcuTWq8nJyYkJCQmJQ4YMqVHcL7deTUxMTIqKikp5//33HSUSCeTWq8nJyYmpqakJM2fOLJNbr6ampiakpKQkbt26NU/xXCEhIcIFCxYUrVixouD27dspivsU7VGTkpISY2NjDa9cuWIsu1e91atXF6WlpSVwudxGAjO3b982Hj58eKNnPW/evKcXLlwwB4Br1671mTlzZkPSt7e3F//2228piYmJSSdOnHi0bt26fk2fyc6dO63MzMzq4+Pjk+7fv590+PBha4FAoHvgwAGL8ePHCwUCQWJSUlLCsGHDqgDA2tq6vq6ujuTn5z93TfBdNvqdUiomhKyGNBlrAThAKU0ghGwGEE0pPY+/k3cigHoAb1NKnwAAIeRTSF8MAGAzpbSk+VV6DkIh8PQpm6Ou0Vy5AiQlSSVhSau+Ioyu4MYNI7Shi47yci1ERhph3DhmvdqDrFdtbGzqzczMxPv27TN3c3OrNjY2bngzq6urI6+99ppzYmKiAYfDQWZmZjMzmOvXr5sKBALD8+fPmwNAeXm5VmJiov7w4cMrly9f7iISiTizZs16Kn/+AGBpaSnOysrStbOzq256vt5Ml/buUkovU0q5lFJXSulnsm0bZQkdVMpblFI+pdSHUnpc4dgDlFI32XKwK+PsDtjI9x4Ak4RVLzk5OiCkdflVQiiys5n1ag+0Xp01a9bTd955x3nevHmNKmifffaZrY2NjSgpKSnxwYMHiSKRSJmNKgkLC8uSx5KTk/Ng5syZZVOmTKm4efNmsoODQ92SJUv6ywf/AVL/9I7+TnsibMhWN8GSuoYjl4Rdt45JwqoLBwcRJJLWm0goJXB0ZNarPdB6NTQ09Okbb7yRP3PmzEYSskKhUKtv374iLS0t7N6927LpwDsAmDhxovDbb7+1lrvNxcXF6ZWVlXFSUlJ0HR0dRevXry9esGBB0d27dw0B6QC+oqIiHQ8Pjx6v5d5emExsN8GSuobDJGHVz9ixlTAxqYeSWl4DJib1GDOGWa/2QOtVc3NzyWeffZbftPybb75ZGBwc7Hr8+HHLcePGCQ0MDJq9ZK1bt644IyNDz8fHx5NSSiwsLESXL19+eO3aNZPw8HA7bW1tamhoWH/s2LF0APj9998NBw8eXKnzHL6gM+vVbmLNGuDQIWnfOuuu1TDS0wE3N+mI9y++UHc0Gk1XW682jH5X1kStpyfB4cOP0I5+4c6AWa+2D02wXl28eLHT9OnTS6dNm9aip3tPhlmvagDyke8soWsgTBJWc5Am7EdYu9YZ5eVaIISCUgITk3p89VVmdyd0gFmvthdNsF719vau7q0JvS1YUu8m5JVBhobBJGE1j5AQIWbPjkNkpBGys3Xg6CjCmDGV6lJtYtar7Ufd1qvr169vu1Wol8KSejdAqTSpT5ig7kgYzWCSsJqJ1HuVWa8yGO2EjX7vBoqKpHmDDZLTMJgkrMYjFqs7AgajZ8GSejfARr5rKEwSVqO5dw/6FhYYdP8+mk2PYjAYymFJvRtgSV0DYZKwGo1EAixeDJeKCmgtWgSXTjBpYzCeC1hS7wYyMqSfLKlrEHJJ2LffZlMSNJDDh2GekgIDSoHkZBhGREBl98aW0NLSGio3/pgyZcqAtoxVlLF582abZzmuJ9Beq9Pg4GCXgwcPmnfGtZta3b7yyiv9uVwu/5NPPrF588037c+dO2fSkfMfOXKkz4YNG/oCUjMbGxsbXx6Px+fxePxVq1Z16QhZuelPe49btmyZ4/nz59t932ygXDeQng5YWQHGxuqOhNEAk4TVWIRCcNatQ7/qammlo7oanDffhPPMmSgzNcUz19n19PQkAoEgEQCCgoL6h4WFWX/88ccF7TnH3r17bV9//fWStlToWkIikYBSCi2t585npFUUrW6zsrK079+/b5SVlRX/LOcSiURoKjqzY8cOu8uXL6fJ11esWFGwefPmdv3uu5sNGzYULl682DkoKKhdU/N65RunpsHc2TQMJgmr0WzYAPuamsb/m2pqwFm/HvaddY2RI0dWpKWl6QHAxx9/bOvu7u7l7u7utXnzZhtAuY3pli1bbAoLC3VGjx7NlduUKhIeHm45fvx4V39/fw9nZ2fv9evX9wWUW5Xu3bvXgsvl8t3d3b1WrlzZUFM8ffq0KZ/P9/Tw8OAHBARw5bHMnj3bxcfHx9PT07PBajU6Olrfx8fHk8fj8blcLv/Bgwd6Ldmv/vbbb4YvvPCCh5eXl+fIkSPdMzMzdeTb5RavO3bssGnpeX3wwQd2XC6X7+HhobRmu2HDhr7e3t6e7u7uXvPmzXOWyPpLtmzZYiO3fZ06deoAALh06ZKxvJbs6enJf/r0KUexlWDChAncwsJCXR6Px7969aqxYotAS/fh7+/vsWTJEidvb2/PLVu22CrGFhcXp6erqyvp27dvq8MuWzv3a6+95uTt7e05YMAAr6ioKMNJkya5Ojs7e69Zs6bhOzlhwgRXLy8vTzc3N6/t27crdeLbvXu3hfx3Nn/+fGexWAyxWIzg4GAXd3d3L3nrBABwudy60tJS7aysrPZVvimlLS6QuqvdaK2MpixDhw6lmoqbG6WzZ6s7CkYDISGUmplRWlam7kh6HJA6LLb77zM2NjaDUhrd1nL3Lo3X06MS6UTQxoueHpXExtIHqpxH2WJgYFBPKY2uq6uLHjdu3NPPP/888+bNm4nu7u5VQqHwbmlp6V1XV9fq33//PeHgwYNpISEhRfJji4uL71FKo+3t7Wtzc3NjlZ3/q6++SreysqrLy8u7V15eHuPm5lYdFRWVKBAI4ggh9Pr160mU0uj09PT7dnZ2tTk5ObF1dXXRw4YNK4uIiEjLycmJtbW1rUtKSoqjlEbn5+ffo5RGv/HGG3nffPPNI0ppdFFR0T1nZ+caoVB4d8GCBQW7d+9+RCmNrq6ujikvL49RFndNTU3MoEGDKnJycmIppdH79u17OGvWrGJKabS7u3vV5cuXBZTS6GXLluW7ublVN72vEydOpAwaNKiirKzsrmJcM2fOLD5w4MBDxW2U0uhp06Y9OXbsWCqlNNra2rquqqoqRh47pTR67NixpdeuXUuilEaXlpberaurixYIBHHyayv+rHid1u7jhRdeKA8NDS1U9nvZuXNn+tKlS/Pl6+vWrcu1trau8/DwqPLw8Kg6ffp0SlvnXrFiRR6lNHrz5s1Z1tbWdRkZGferqqpibGxs6vLy8u4pPgP5716+Xf6diYmJiR87dmxpTU1NDKU0OjQ0tPDrr79Ov3nzZmJAQIBQHp/8OVFKo0NCQooOHjyY1vSeZH9PSv/WWn0DoJTWE0IkhBAzSmm3Kzn1BurrgcxMYOZMdUfCACBtNjl1Sjri3aRD3XSMTkY+OE7Ugl2LSAQsWgSXmBgkP4sOTW1tLYfH4/EBYNiwYeVr164t3rZtm/XLL79campqKgGA//f//t/TGzdumAQFBQmb2piqco2RI0eW2dnZ1cvPFRkZaRwSElKqaFX6+++/Gw0fPrzc3t5eDAAhISElUVFRxlpaWtTf37+cx+PVAYDcBjYyMtL02rVrfcLDw+1k90HS0tJ0AwICKrdv3943Oztbd+7cuU99fHxqldmv3rlzRz81NdVg3LhxXOlzlsDa2lpUXFysVV5erjVlypQKAFiyZMmTX3/91azpPf3888+m//jHPxqMb5TZ0165csVkx44ddjU1NZzS0lJtPp9fDUDo4eFRPWPGjP5BQUGloaGhpQAwfPjwig0bNjjNmTOnZN68eU9dXV1V6sqIi4vTU3Yf8v1N3d/k5OXl6VhbWzeqpTdtfm/pGcn3z5gxoxQABg4cWO3m5lbt7OwsAgAnJ6faR48e6drZ2VV/8cUXtpcuXeoDAPn5+ToJCQn6dnZ2DVoLV69eNYmPjzccOHCgJwDU1NRwbGxsxCEhIaWPHz/WW7hwodMrr7winDFjRoNwj7W1tTgnJ0dXlecjR5VqfQWAB4SQnwE0BEgpZZqaKpCbK/1nxJrfNQQmCauxJCZCLz4eRi2NdJdIQB48gHFSEvS8vNBu9y3FPvW2kNuYnjlzxuyjjz5yuH79etn27dvzFMtERET02bp1qz0A7Nu3LwMASJNBl/L1jliAUkpx+vTptIEDBza65yFDhtSMGjWq8scffzSbOnWq+9dff50ZFBRU3jTuOXPmlLq5uVXHxsYKFI8vLi7ulI79qqoqsn79eufbt28nurm5id566y37mpoaDgDcuHEj9cqVKyY//fST2fbt2/smJycnbN26NX/69OnCn376yWzUqFG8S5cuparyfCilRNl9yGlpnIOBgYFEKBS2VYFt9dxyWWAOhwM9Pb0GiWAOhwOxWEwuXrxoEhUVZRIdHS0wMTGR+Pv7ezS1n6WUktmzZz/55ptvmpkIxcfHJ/7444+me/bssT5x4oTFqVOnMgCgpqaGKDO4aQ1V3nfPAvgIwE0AMQoLQwXYdDYNgknCajR8Pmq9vVHJ4UCprjqHA+rjgwpPz/Yn9JYYO3ZsxeXLl/uUl5dzysrKOJcvXzYfO3ZsuTIbUwAwMjKql1uRLliwoFTu7/3SSy9VAcDvv/9uWlBQoFVRUUEuX77cZ/To0c1q+KNGjaq8ffu2SV5enrZYLMapU6csxowZUzFmzJjKv/76y0QgEOgCQEFBgZYsxrKwsDBbeT/1H3/8YQAAiYmJup6enrUffvhh4eTJk0tjY2MNlMXt6+tbU1JSon39+nUjQFrTj46O1reysqo3MTGpv3btmjEAHDp0SKkt7OTJk8uOHj1qJR/1L49LTlVVFQcA7OzsxEKhkHPhwgVzAKivr8fDhw91X3nllfJvvvkmp6KiQksoFGolJCTo+fv7V3/22Wf5vr6+lfHx8fqq/K5auo+2jvPy8qp5+PBhq1oHz3puOaWlpVpmZmb1JiYmknv37unfv3/fqGmZwMDAsosXL5rL7WsLCgq0UlJSdPPy8rTr6+uxaNGi0n//+985Dx48aLDHffjwof7AgQOrVY0DUKGmTik93J4TMhrDprNpEEwSVqPhcICDB5EREAB+rZK0raMDHDqEjM6UgB85cmTV/PnznwwZMsQTAF599dWiESNGVJ85c8a0qY0pACxcuLA4MDCQa2trW3f79u2Upufz9fWtDAoKcs3Pz9edNWvWk5deeqkqOTm5UfOps7OzaNOmTTmjR4/mUkrJhAkTSv/xj3+UAkB4eHjGjBkz3CQSCSwtLUW3bt1K/fzzz3OXLVvWj8fj8SUSCXFycqq9ceNG2tGjRy1Onjxpqa2tTa2trUWffvpp3u+//27UNG59fX16/Pjxh2vWrOlXXl6uVV9fT1auXFng5+dXs3///oylS5e6EEIwZswYpXrts2bNKrt7967hoEGDPHV0dOiECROEu3btaqhtWllZ1YeGhhZ5enp6WVtbiwcOHFgJAGKxmMyfP79/eXm5FqWULF26tNDKyqp+/fr19rdu3TIlhFAPD4/qWbNmCbOystocsdrafbR23OTJkyvee+89J4lEAk4LX55nPbec4OBg4b59+6wHDBjgNWDAgBr5M1Bk6NChNR9++GHO+PHjuRKJBDo6OjQ8PDzL0NBQ8tprr7lIJBICAJs3b84GpC8WGRkZei+99FK75JLbtF4lhIwA8DEAZ0hfAggASikd0Npx3Y2mWq9+8ol0qa4G9JgulvqorQWcnYHBg6Vz1BnPRJdbrwJ4/XU4HjkC69rav1sS9fQgefVVFH33HbLbe+3uIjw83DI6OtooIiIiS92xMBqzePFip2nTppU29XjXZCIiIvrExMQYfvXVV7lN97VmvarKO+9+ADsAjATwAgA/2WebEEICCSHJhJA0Qsh7SvYvIoQUEUJiZctShX31CtvPq3I9TSQ9HbC3Zwld7TBJ2B5DWBhy9fUbz0fX14ckLAzN/rkxGKqwefPmvMrKyh41hVssFpOPPvqo3XPpVamp36aUDmv3iQnRApACYCKAbAB3AMyjlCYqlFkEwI9SulrJ8RWUUpXlWjS1pj56tHQE/O+/qzuS5xiJBPD2lr5Z3b3LFOQ6QHfU1AHg4EGYv/EGXKqrwTEwgGT3bqQvWoTS9l6XweiNdLSmfoMQso0QEkAIGSJfVDjOH0AapfQRpbQOwHEA01QPu3fAhGc0ACYJ2+NYuBBPuVxUEwJ4eKBqwQKW0BkMVVBlSpu8lq74dk4BtOWC4QDgscJ6tsK5FAkmhLwEaa1+HaVUfow+ISQagBjA55TSc00PJIQsA7AMAPr169fWfXQ7dXVAdjZL6mqHScL2OOSD5sa+JOYdOqTdqYPjGIzeTIt/KoSQtbIfP6KUjm2ydJat1QUALpRSXwA/A1Acae8sa+abD2AnIcS16cGU0n2UUj9KqZ+1tXUnhdR5ZGVJtbBYUlcjTBK2xzIY9/CUWGAg7qs7FAajx9Da++9i2Wf4M547B4CTwrqjbFsDlNInlFL55JXvAQxV2Jcj+3wEIBLA4GeMQ22w6WwaQFgYYGYGLF3adlmG5iCVl3MhFRVaWLTIBcx7lcFQidaSehIhJBWAByEkTmF5QAiJU+HcdwC4E0L6E0J0AcwF0GgUOyGkr8JqEIAk2XZzQoie7GcrACMAqKQEpUkw4Rk1I5eEXb6cScL2NA4fNkdKigGk3quGiIhg1qtdzPNkvUoIGRofH98wJ2nz5s02hJChN2/eNGz5LFJzl7bKKGPr1q3WO3futGx/5O2nxS8npXQegFEA0gC8orBMlX22CqVUDGA1gGuQJuuTlNIEQshmQkiQrNgaQkgCIeQ+gDUAFsm2ewKIlm2/AWmfeo9M6tragKOjuiN5TmGSsD0ToZCDdev6QS6zWV3NwZtvOqOsrEPJVC4Tm5qamqCjo0PDwsLa3We3d+9e24qKimeOQyKRoL6+mXT6c48y69WUlJTETZs2Fe7cuTO3PfPLRUrMA3bs2GG3fv36Ivm6u7t7dURERIOC3rlz5yzc3NxUEpp5Fv75z38+2bt3r23bJTtOq19OSmk+pXQgpTSz6aLKySmllymlXEqpK6X0M9m2jZTS87Kf/0Up9ZJdYyylVCDbfotS6iPb7kMp3d/RG1UH6elAv37SvMLoZpgkbM9lwwZ7yLTDG6ip4WD9ema9yqxXO8V69eWXXy69fPlyHwBISEjQMzExEZubmzfsDw0N7eft7e3p5ubmtW7dOqXfu7Nnz5oOGjSIx+fzPadMmTJALh+8atUqB/k9L1u2zBGQ6tI7OjrW3rhxo921/PbSPp9WRrtITwdcXNQdxXMKk4Ttmdy7p48jR2xQW9t47mFtLQdHjthg9eoiNDE2aS8ikQjXrl0znTRpUtlvv/1m+MMPP1jGxMQkUamFs+f48ePLU1NT9ezs7ESRkZFpAPDkyRMtS0vL+m+//dY2KioqpSVv7ri4OKMHDx4kGBsbSwYPHsyfNm2a0NbWVpyVlaW3f//+9PHjx2dkZGTofPzxxw4xMTFJ1tbW4lGjRnGPHDnSZ/z48RWrV692iYyMFPB4vDq5xvr777/fd+zYsWWnTp3KKC4u1vLz8/MMCgoq+/rrr61XrVpVsHLlypKamhoiFotx+vRps6Zx19bWkjVr1vS7dOlSmr29vfi7774z37Bhg8OpU6cyXnvtNZevvvoqa8qUKRXLly9X2qZ48uRJ08uXL/eJiYkRmJiYSJpqvwPA22+/XSg3vJk+fXr/48ePm82fP18YHh5ul5mZ+cDAwIDKDWTCwsLswsPDMydNmlQpFAo5hoaGksLCwoZzXbhwIW3q1KnucvOd7777zgqQyqa2dB8AUFdXR+Lj45Oaxnbjxg1jX1/fKsVtpqam9fb29nV37tzRP336dJ9Zs2Y9PXLkSIMH+o4dO3JsbW3rxWIxXnzxRY/bt28bDBs2rEGDPS8vT3vr1q19b968mWJqair54IMP7D799FPbDRs2FF6+fNn80aNH8RwOp5FpzpAhQyojIyNNxo4d2yiWzqZX9g1pCmyOupqorQXCw4HAQMDHR93RMFRFNjgOrXuvPvOgObn1qo+PD9/R0bFu7dq1xZGRkcZy61UzMzOJ3Hp1yJAh1b/99pvpypUrHa5evWpsaWmpUpu53HrV2NiYyq1XAaAl61UdHZ0G69XIyEijlqxXv/zyy748Ho8/cuRID0Xr1bCwsL4ffPCBXWpqqq6xsTFVFreiZSmPx+Nv27atb25uro4y61Vl96Sq9aqvry+Py+Xyb926ZRIfH28AAHLr1d27d1vo6OhQ4G/r1S1bttgUFxdr6ag4K6Wl+5Dvb4/1KgDMmTOn5MiRIxaXLl0yDw0Nfaq47/DhwxZ8Pt+Tz+fzU1NT9e/fv9/I3CUyMtLo4cOH+v7+/jwej8c/fvy4ZVZWlq6lpWW9np6eJCQkxOXw4cN9jI2NG76sNjY2YsV4uwpWU+8iKiuBwkKW1NUCk4TtmSQm6iE+3qjFpC2REDx4YIykJD14eTHrVWa92oj2Wq+GhIQIN27c6Ojj41NlYWHRcKxAINDdtWuXrawlpT44ONilpkl3EKUUI0eOLLtw4UJ60/PGxsYmnT9/3vT06dPm3377rc2ff/6ZAkj909tro/ostDZP/QIh5HxLS1cH1tPJlI06YEm9m5FIpNPYBg0CxnWWnAKjW+Dza+HtXQkOR7l2NYdD4eNTAU9PZr0KZr3aUetVExMTyccff5z90UcfNXpZe/r0qZaBgYHEwsKi/vHjx9qRkZFmTY8dM2ZMZXR0tLF8BH1ZWRknLi5OTygUckpKSrRCQkKEe/bseSwQCBr60FNSUvS8vb3bZaP6LLRWU98u+5wJwA7AUdn6PADtFpl/3mDT2dSEXBL22DEmCdvTkMrIZSAggI+WvVcz0Inycsx69fm2Xl22bNnTpuUDAgKqvb29q1xdXb379u1bN3To0GYvZvb29uK9e/dmzJ07d0BdXR0BgE2bNuWYmZlJpk6d6lYrGxPy6aefNqiq3rlzx/iLL77oclMiVQxdopsaOCjbpm40zdBl1y7gn/8E8vIAOzt1R/McMWYM8OgR8PAhU5DrArrF0OX11x1x5Ig1amv//g+spyfBq68W4bvvmPUqo92o23r1jz/+MNi2bZvduXPnmjXXPwsdNXQxIoQ0iAIQQvoDMOqMwHoz6emAvj5g2y0zExkAmCRsbyEsLBf6+o37HvX1JQgLY9arjGdC3darhYWFOl988UVO2yU7jioD5dYBiCSEPAJAADgDWN6lUfUC5NPZWAtwN8IkYXsHpqYSfPllFt54wwXV1RwYGEiwc2cmTE01Wit2zZo1TwAoHUHOUC9OTk7i0NBQobquP2PGDKVdG11Bm28ulNKrANwBrIVU9c2DUnqtqwPr6bDpbN0Mk4TVdCQSiUT1V9yFC5+Cy62G1Hu1CgsWMOtVBgOA7O+oxRfcNpM6IcQQwNsAVlNK7wPoRwiZ2nkh9k5YUu9mmCSsphNfVFRkpnJilw+aMzau7+zBcQxGT0UikZCioiIzAPEtlVGl+f0ggBgAAbL1HACnAFzscIS9lNJSQChkSb3bKCkB9u9nkrAajFgsXpqfn/99fn6+N1QVveJwgMjIbAAmuH+fNb8wGNIaerxYLG6xj1GVpO5KKQ0hhMwDAEppFWmqsMBoBJvO1s3s2SNV+2GSsBrL0KFDCyF1YmQwGF2IKm/MdYQQAwAUAAghrgA6TfyhN9KRpM4MnNoJk4RlMBiMBlRJ6h8DuArAiRByDMAvAN7pyqB6Os+a1AUCwMoKSE7u/Jh6JfX1TBKWwWAwFGiz+Z1S+l9CSAyA4ZBOaVtLKVVNROI5JT0dMDUF+vRR/RhKgSVLgLIy6efvv7PpcK0iEADDh0vfgpgkLIPBYABQbfT7LwCGUUovUUovUkqLCSH7uiG2Hot85Ht7kvLp08C9e1Lp8rt3gTNnui6+Ho/iG9DDh9JaOnsDYjAYDJWa3/sDeJcQsklhm0ZJxGoa7Z3OduYMMHcuUCNTMK6pAUJCgLNnuya+Hs+ZM0BcnDS5EwJoM7NBBoPBAFRL6qUAxgOwlTm3NXOsYfwNpUBGhupJ/fJlaQJv6jYpkQBz5kj3MxSoqABWrJCOdgekD/yNN/5eZzAYjOcYVZI6oZSKKaWrAJwB8DsAG1VOTggJJIQkE0LSCCHvKdm/iBBSRAiJlS1LFfYtJISkypaFqt6QuiksBKqrVUvqlAKLF7c84r2+XtrK3IbnzvPFpk1AVVXjbVVVwMaN6omHwWAwNAhVkvoe+Q+U0kMAFgH4b1sHEUK0AHwDYAoAPoB5hBC+kqInKKWDZMv3smMtAGwCMAyAP4BNhBBzFWJVO+0Z+f7nn0BxG0MOi4qA27c7HlevQCAAvv1W+takSHW1dDubNsBgMJ5zWuyMJISYUkrLAJySJVk56QBUmT/kDyCNUvpIdr7jAKYBSFTh2MkAfqaUlsiO/RlAIID/a+mAJ0+efLdabAAAGX9JREFU4NChQ422eXl54YUXXoBIJMKxY8eaHTNo0CAMGjQIVVVVOHnyZLP9fn5+8Pb2hlAoxI8//thsf0BAADw8PFBcXIyLF6UCe4WFwKJFQGYm8OjRSxgwYADy8/Nx9erVZscLheMhkTjByekxxo//pdn+q1cDkZ9vh9u3H0EguNls/9SpU2FlZYXk5GT873//a7Z/xowZMDMzQ3x8PJTZ0s6ZMweGhoaIjY1FbGxss/2hoaHQ0dHBnTt3kJCQ0Gz/okWLAAC3bt1CSkpja2kdHR2EhoYCAKKiopCe3thx0NDQEHPmzAEAXL9+HdnZjR01TU1NMXPmTNlzuIr8/HzpSMKQEACA5ZMneOXCBQDAhVdewRNLS2D3bmDwYACAnZ0dAgMDAQBnz55FWVljPwVHR0dMmDABAHDy5ElUNan99+/fH6NHjwYAHDt2DCKRqNF+LpeLF198EQCafe8A9Xz3FHnppda/e+PHj4eTkxMeP36MX35p/t0LDAyEnZ0dHj16hJs3m3/3GAyGZtJaTf0H2WcMgGjZZ4zCels4AHissJ4t29aUYEJIHCHkNCHEqT3HEkKWEUKiCSHRTf/pqgv5YDd9/bbLcrloU9KawwHc3DoeV4+nuhoob8MKuby8eS2ewWAwniMI7aIOW0LILACBlNKlsvVXIZ0at1qhjCWACkppLSFkOYAQSuk4QsgGAPqU0i2ych8BqKaUbm/pen5+flRZbbS7ef114KefpDX2tqAUsLNrvaytLZCXx2ZsgVJgxAhpn4Wy7yyHI523zib4dymEkBhKKZv9wmBoKC3WEwkhQ1pbVDh3DgAnhXVH2bYGKKVPKKVyydnvAQxV9VhNpT3T2QgBDh6UmospQ0sLOHCA5SgA0oewZ0/Lowb19NjDYjAYzz2tTfANa2UfBdCWhNcdAO6EkP6QJuS5AOYrFiCE9KWU5slWgwAkyX6+BmCrwuC4SQD+1cb1NIKMDMCvHfWYl18GTp4EZs9uPK2Nw5Fuf/nlTg+x53LnjvRTVxeoq/t7u4EBsHIl4OGhnrgYDAZDQ2gxqVNKx3bkxJRSMSFkNaQJWgvAAUppAiFkM4BoSul5AGsIIUEAxABKIB1ZD0ppCSHkU0hfDABgs3zQnCZTXw9kZUkTdHuYORM4fhxYuFDaJayvDxw5It3OkCGRAGFhgK8vkJ0ttVuVY2gIbN6svtgYDAZDQ1CpT50Q4g3ptLSG4V+U0ogujKvdaEKfelYW4OwM7N0LLFvWvmPlXca3b7OuYaVcugRMnQocOyatqS9aJBWcMTICDh8GgoPVHeFzAetTZzA0G1W03zcB+Fq2jAXwHzBfZKV0xHKVEGmXsKkp6xpWyrZtgJOTtBkkOFhaY+dwgIEDWZMGg8FgyFBFfGYWpDKx+ZTSxQAGAmBSsUroSFIHAB5PKkbDuoabcOcOEBUFvPkmoKPD3oAYDAajBVRxwqimlEoIIWJCiCmAQjQemc6QkZ4uzS9OHXg6LY2Ef64JCwPMzKTzBeXI34DYA2MwGIwGVEnq0YSQPgC+g1R4pgJAc/kyBtLTAQcH6ewqRieRng6cOiW1VzUxabyPJXQGg8FoRKtJnRBCAPybUloKYA8h5CoAU0ppXLdE18NojzsbQ0V27pQm7zVr1B0Jg8FgaDyt9qlT6dD4ywrrGSyht0x7fdQZbVBSAuzfD8yfL20CYTAYDEarqDJQ7i4h5IUuj6SHU1sL5OSwpN6p7Nkjnba2fr26I2EwGIwegSp96sMAhBJCMgFUAiCQVuJ9uzSyHkZWlnSuOUvqnURtLRAeDgQGAj4+6o6GwWAwegSqJPXJXR5FL6Cj09kYTTh6FCgokA6QYzAYDIZKtNn8TinNhHQK2zjZz1WqHPe8IU/qLi5qDaN3IJeEHTQIGNeWxQCDwWAw5LRZU5cpyvkB8ABwEIAOgKMARnRtaD2L9HSpLgobz9UJXLkCJCVJJWGZsAyDwWCojCo17hmQysJWAgClNBeASatHPIdkZAD9+rGp052CoiQsg8FgMFRGlaReJ5vaRgGAEGLUtSH1TNh0tk6iqSQsg8FgMFRGlaR+khCyF0AfQsjrAK5Dqi7HUIAl9U5CmSQsg8FgMFSizT51Sul2QshEAGWQ9qtvpJT+3OWR9SAqKoCiIpbUO0xrkrAMBoPBaBNVprRBlsRZIm+BjAzpJ0vqHYRJwjIYDEaHUMVPfSYhJJUQIiSElBFCygkhZd0RXE+BTWfrBJgkLIPBYHQYVWrq/wHwCqU0qauD6akw4ZlOgEnCMhgMRodRZaBcwbMmdEJIICEkmRCSRgh5r5VywYQQSgjxk627EEKqCSGxsmXPs1y/u8jIAAwNARsbdUfSQ2GSsAwGg9EpqOqnfgLAOQC18o2U0rOtHUQI0QLwDYCJALIB3CGEnKeUJjYpZwJgLYDbTU7xkFI6SIX41E56urTpnemkPCPHjjFJWAaDwegEVEnqppBKw05S2EYBtJrUAfgDSKOUPgIAQshxANMAJDYp9ymALwC8rUrAmgibztYBJBJg+3YmCctgMBidgCpT2hY/47kdADxWWM+G1PGtAULIEABOlNJLhJCmSb0/IeQepFPpPqSU/tb0AoSQZQCWAUC/fv2eMcyOQak0qY8apZbL93yYJCyDwWB0GqqMfucSQn4hhMTL1n0JIR929MKEEA6AHQCUjYzKA9CPUjoYwFsA/n979x9kVXnfcfz9QQWVH2UMqAgIaLAWf4Toipma2IxVQ4xipmKLxpk4SYNMoTUqNjqxmUb/sFHGTpzBKI5OYqpSgzN1UyGmP8SoFWEVgoKxAhcBoxYVQRNAgW//OOea68rde+7u3l/nfl4zO7vn3Oec+31WnO8+5z7P93lA0rDujSJiQUR0RETHyJEj+xpSr2zbBjt2eKTeay4Ja2bWb7JMlLsbuB74ECAiVgMzMlz3GsnubkVj0nNFQ4ETgaWSNgKfAzoldUTE7oh4O32/54D1wHEZ3rPuPPO9D1wS1sysX2VJ6odGxPJu5/ZkuG4FMFHSBEkDSf4Q6Cy+GBHbI2JERIyPiPHAMmBaRHRJGplOtEPSMcBEYEOG96w7r1HvA5eENTPrV1mS+luSjuUPG7pMJ3k83qOI2APMAR4DXgIeiog1km6UNK3C5WcCqyWtAhYBsyLinQyx1p2ryfVSsSTsFVe4JKyZWT/JMvt9NrAAOF7Sa0ABuCzLzSNiMbC427nvlWn7xZKfHwYezvIejVYowPDhyZdVwSVhzcz6XZbZ7xuAs9MtVwdExHu1D6t1eDlbLxRLwp5zDixfDqNGwemne/a7mVkfVUzqkq7udgywHXguIlbVKK6WUSjApEmNjqLFXHVVUhL28cfhqaeSterDh8Ndd8F55zU6OjOzlpXlM/UOYBbJuvPRwBXAVOBuSX9fw9iaXkTymbpH6lV45BG4777k5507k/WA778PW7bA9OmweHHP15uZWVlZkvoY4JSIuCYirgFOBQ4nmcx2eQ1ja3pvvAG7djmpZxYBl19e/vWdO5OJcxF1C8nMLE+yJPXDKan5TrJe/YiI2NntfNvxcrYqPfMMbN/ec5t3300+Zzczs6plmf1+P/CspEfS4wtIKrwN5pN13NuKl7NV6dFHK4/CBwyA3/62PvGYmeVMltnvN0laApyRnpoVEV3pz1+rWWQtwCP1Ki1Zksxw7ymx79sHRx1Vv5jMzHIky0idNIl3VWzYZgoFOOKIZC91q2DFCli5EoYNSybHlTN8OEyZUr+4zMxyJMtn6laG16hXoVgS9p574JBD9t/mkEOSZW1er25m1itO6n3gpJ5RaUnY6dNh0SIYMwaGDElG7kOGJMeLFnmduplZH2R6/G6ftGcPbNoEl1zS6EhaQPeSsOedl/zyli9PJsUddVTyyN0jdDOzPnFS76UtW2DvXk+Sq6hYEvbSS2H06D+cl5LSsGZm1m/8+L2XvJwtozvvTErCXnNNoyMxM8s9J/VeKi5nc1Lvwe7dcPvtMHUqnHRSo6MxM8s9J/VeKhSSOilHH93oSJrY/ffDm2/C3LmNjsTMrC04qfdSoZBM2D7ooEZH0qT27YN582DyZDjrrEZHY2bWFjxRrpe8nK2CJUvgpZeS0bpntZuZ1YVH6r3kpF7BrbfC2LFw8cWNjsTMrG3UNKlLmirpZUnrJF3XQ7uLJIWkjpJz16fXvSzpS7WMs1q7diXLq72crYwVK+CJJ+Db3/bnE2ZmdVSzx++SDgDmA+cAW4AVkjojYm23dkOBK4FnS85NAmYAJwBHAf8p6biI2FureKuxaVPy3SP1MoolYb/1rUZHYmbWVmo5Up8CrIuIDRHxAbAQuHA/7W4CfgDsKjl3IbAwInZHRAFYl96vKXg5Ww9KS8IOHdroaMzM2kotk/poYHPJ8Zb03EcknQKMjYhHq702vX6mpC5JXVu3bu2fqDNwUu9B95KwZmZWNw2bKCdpAHAb0OtSYxGxICI6IqJj5MiR/RdcBYUCDBzobb8/oVxJWDMzq4taLml7DRhbcjwmPVc0FDgRWKpkydORQKekaRmubahCAcaNS4rPWAmXhDUza6hapqUVwERJEyQNJJn41ll8MSK2R8SIiBgfEeOBZcC0iOhK282QNEjSBGAisLyGsVbFy9n2wyVhzcwarmZJPSL2AHOAx4CXgIciYo2kG9PReE/XrgEeAtYCvwBmN8vMd3BS3y+XhDUzazhFRKNj6BcdHR3R1dVV8/d57z0YNgxuvhmuK7vyvs3s2wcnngiDBsHzz7uCXI5Jei4iOiq3NLNGcJnYKnnL1f1wSVgzs6bgqV5V8nK2/XBJWDOzpuCkXiUn9W5cEtbMrGk4qVepUIDBg2HEiEZH0iRcEtbMrGk4qVepOPPdHx3jkrBmZk3GSb1KXs5WwiVhzcyaipN6FSKS2e/echWXhDUza0JO6lV4551knbpH6rgkrJlZE3JSr4JnvqdcEtbMrCk5qVfBST3lkrBmZk3JSb0KTuokJWHnzYPJk+GssxodjZmZlXCZ2CoUCnDYYUnt97blkrBmZk3LI/UqeDkbySjdJWHNzJqSk3oV2n45W1cXLF3qkrBmZk3KST2jffuSpN7WI/V581wS1sysiTmpZ/TGG8lKrrZN6i4Ja2bW9JzUM2r7me8uCWtm1vSc1DNq66TukrBmZi2hpkld0lRJL0taJ+m6/bw+S9ILklZJekrSpPT8eEk70/OrJN1ZyzizKCb1tpwo55KwZmYtoWbr1CUdAMwHzgG2ACskdUbE2pJmD0TEnWn7acBtwNT0tfURMblW8VWrUIBRo+DggxsdSZ25JKyZWcuo5Uh9CrAuIjZExAfAQuDC0gYRsaPkcDAQNYynT9p2OZtLwpqZtYxaJvXRwOaS4y3puY+RNFvSeuAWoHQW1gRJKyU9IekLNYwzk7YsPOOSsGZmLaXhE+UiYn5EHAt8B7ghPf06cHREfBa4GnhA0ieKs0qaKalLUtfWrVtrFuOePbB5cxsm9WJJ2GuvdUlYM7MWUMuk/howtuR4THqunIXAVwEiYndEvJ3+/BywHjiu+wURsSAiOiKiY+TIkf0WeHebN8PevW2Y1F0S1syspdQyqa8AJkqaIGkgMAPoLG0gaWLJ4VeAV9LzI9OJdkg6BpgIbKhhrD1qy+VsLglrZtZyajb7PSL2SJoDPAYcANwbEWsk3Qh0RUQnMEfS2cCHwDbg6+nlZwI3SvoQ2AfMioh3ahVrJW2Z1F0S1sys5dR069WIWAws7nbueyU/X1nmuoeBh2sZWzUKhaSY2tixldvmQrEk7Ny5LglrZtZCGj5RrhVs3Jgk9APbZfd5l4Q1M2tJTuoZFApttEbdJWHNzFqWk3oGbbVG3SVhzcxalpN6BTt3wuuvt0lSd0lYM7OW5qRewauvJt/bIqm7JKyZWUtzUq+gbZazuSSsmVnLa5f53L3WNkm9WBL2/vtdEtbMrEV5pF7Bxo0waBAceWSjI6kxl4Q1M2t5TuoVFAowbhwMyPNvyiVhzcxyIc+pql+0xXI2l4Q1M8sFJ/UKcp/UiyVhr7jCJWHNzFqck3oPduxICqzlOqm7JKyZWW44qfcg9zPfXRLWzCxXnNR7kPuk7pKwZma54qTeg40bk++5TOouCWtmljtO6j0oFGDIEDjssEZHUgMuCWtmljtO6j0oznzPXYE1l4Q1M8sll4ntQaEAxxzT6ChqwCVhzcxyySP1MiJyvEbdJWHNzHKppkld0lRJL0taJ+m6/bw+S9ILklZJekrSpJLXrk+ve1nSl2oZ5/689VYyMTx3Sd0lYc3McqtmSV3SAcB84MvAJOCS0qSdeiAiToqIycAtwG3ptZOAGcAJwFTgjvR+dZPb5WwuCWtmllu1HKlPAdZFxIaI+ABYCFxY2iAidpQcDgYi/flCYGFE7I6IArAuvV/d5HI528aNLglrZpZjtZwoNxrYXHK8BTi9eyNJs4GrgYFAcSr2aGBZt2s/UfJM0kxgZnq4W9KLfQ/7404+ub/v2CsjgLf67W633JJ8NV7/9qu55LVvf9zoAMysvIbPfo+I+cB8SZcCNwBfr+LaBcACAEldEdFRmygbK699y2u/IL99k9TV6BjMrLxaPn5/DRhbcjwmPVfOQuCrvbzWzMys7dUyqa8AJkqaIGkgycS3ztIGkiaWHH4FeCX9uROYIWmQpAnARGB5DWM1MzNreTV7/B4ReyTNAR4DDgDujYg1km4EuiKiE5gj6WzgQ2Ab6aP3tN1DwFpgDzA7IvZWeMsFtepLE8hr3/LaL8hv3/LaL7NcUERUbmVmZmZNzxXlzMzMcsJJ3czMLCdaLqlnKD07SNK/pq8/K2l8/aOsXoZ+nSnpeUl7JE1vRIy9laFvV0taK2m1pP+SNK4RcVarL2WQm12lvpW0u0hSSMrd8j2zVtRSST1j6dlvAtsi4tPAPwM/qG+U1cvYr03A5cAD9Y2ubzL2bSXQEREnA4tISgY3tb6UQW52GfuGpKHAlcCz9Y3QzMppqaROhtKz6fFP0p8XAX8uNf3+ollK6m6MiNXAvkYE2AdZ+vZ4RPw+PVxGUpeg2fWlDHKzy/L/GcBNJH8076pncGZWXqsl9f2Vnu1ePvajNhGxB9gOfKou0fVeln61qmr79k1gSU0j6h+Z+iVptqT1JCP1v6tTbH1VsW+STgHGRsSj9QzMzHrWakndckzSZUAHcGujY+kvETE/Io4FvkNSBrnlSRpA8lHCNY2Oxcw+rtWSepbysR+1kXQg8EfA23WJrvfyXBY3U9/SIkTfBaZFxO46xdYXfSmD3Owq9W0ocCKwVNJG4HNApyfLmTVeqyX1iqVn0+PipjDTgf+O5q+wk6VfrSpLueDPAneRJPT/a0CMvdGXMsjNrse+RcT2iBgREeMjYjzJPIhpEeHNXswarKWSevoZebH07EvAQ8XSs5Kmpc3uAT4laR3Jlq5ll+M0iyz9knSapC3AxcBdktY0LuLsMv43uxUYAvwsXf7V9H/QZOzXHElrJK0i+beYeQfCRsrYNzNrQi4Ta2ZmlhMtNVI3MzOz8pzUzczMcsJJ3czMLCec1M3MzHLCSd3MzCwnnNSt7iQtLRYqkbRY0vA+3u+Lkv69zGsPpru/XdWX9zAzawUHNjoAy590Ax1FRMXNZyLivBrGcSRwWrpjX9ZrDkzXaZuZtRyP1NuEpH9I98d+Kh29zk3PHyvpF5Kek/SkpOPT8z+WdLuk/5G0oXQPd0nXSlqRjoC/n54bn97/PuBFYKykH0nqSguwfL9MXBsljUj3Hl+VfhUkPZ6+fq6kZ5TsJf8zSUPS81Ml/UbS88BflOn2L4HR6T2/kD4h+GF6/KKkKem9/lHSTyU9Dfy0P37fZmaN4KTeBiSdBlwEfIZkj+zSGt0LgL+NiFOBucAdJa+NAj4PnA/8U3qvc4GJJNtzTgZOlXRm2n4icEdEnBARrwLfjYgO4GTgzySdXC7GiLgz3Xf8NJJdwW6TNIJkE5SzI+IUoAu4WtLBwN3ABcCpwJFlbjsNWB8RkyPiyfTcoen7/A1wb0nbSen7XFIuRjOzZufH7+3hDOCRiNgF7JL0c4B01PunJOVZi20HlVz3b+kj9LWSjkjPnZt+rUyPh5Ak803AqxGxrOT6v5Q0k+Tf2SiSxLm6Qqw/JKnX/3NJ56fXPJ3GNxB4BjgeKETEK2k//gWYmfF38SBARPxK0rCSz/M7I2JnxnuYmTUlJ/X2NgB4Nx257k/pbmkq+X5zRNxV2lDSeOB3JccTSEb+p0XENkk/Bg7uKRhJlwPjSOqOF9/rP7qPniWVizeL7nWRi8e/697QzKzV+PF7e3gauEDSweno/HyAiNgBFCRdDMkEN0mfqXCvx4BvlHy2PVrS4ftpN4wkUW5PR/lf7ummkoqP/y8rmWC3DDhD0qfTNoMlHQf8Bhgv6di0XTWPzP8qvdfnge0Rsb2Ka83MmppH6m0gIlakO5+tBt4EXgCKyexrwI8k3QAcRLLv9697uNcvJf0J8Ez6SPx94DJgb7d2v5a0kiQBbyb5w6Inc4DDgMfT+3ZFxF+no/cHJRU/FrghIv43faz/qKTfA0+S7PGdxa40roOAb2S8xsysJXiXtjYhaUhEvC/pUOBXwMyIeL7RcdWTpKXAXO/7bWZ55ZF6+1ggaRLJ59o/abeEbmbWDjxSNzMzywlPlDMzM8sJJ3UzM7OccFI3MzPLCSd1MzOznHBSNzMzy4n/ByovRNlIJOlHAAAAAElFTkSuQmCC\n", - "image/svg+xml": "\n\n\n\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n\n", - "text/plain": "
" + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAfMAAAEKCAYAAAAGkryaAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjQuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/MnkTPAAAACXBIWXMAAAsTAAALEwEAmpwYAABaH0lEQVR4nO3dd1hUZ/YH8O8ZOoIIig0QlI5gJahRY4sJ7s8YFY1tExPjqklcjUaT7Ka4MWuams2apqYZExN7EkvUxF1LqhE7HXRQVFAUpUgd5vz+uDPuSB3KMDNwPs/DA3Pn3jvnjsiZe+/7nkPMDCGEEEJYL5W5AxBCCCFEw0gyF0IIIaycJHMhhBDCykkyF0IIIaycJHMhhBDCykkyF0IIIaycSZM5EUUTUTIRpRHR81U8/y8iOqn7SiGimwbPTSeiVN3XdFPGKYQQQlgzMtU8cyKyAZACYCSAiwCOApjCzAnVrP9XAL2ZeQYReQCIBRAJgAEcA9CXmW+YJFghhBDCipnyzDwKQBozn2PmUgAbATxYw/pTAHyt+/l+AD8yc44ugf8IINqEsQohhBBWy9aE+/YCkGHw+CKAflWtSES+ALoC+G8N23pVsd0sALMAoFWrVn1DQkIaHrWZZGYCly8DvXsDKhnJICzUsWPHrjGzp7njEELcyZTJvC4mA9jKzOV12YiZ1wJYCwCRkZEcGxtritiaxOOPA7t3A8ePmzsSIapHROfNHYMQojJTngNeAuBj8Nhbt6wqk/G/S+x13bZZUKuBrl3NHYUQQghrZMpkfhRAIBF1JSJ7KAl7R8WViCgEgDuA3wwW7wNwHxG5E5E7gPt0y5otSeZCCCHqy2TJnJk1AOZCScKJADYzczwRLSWiMQarTgawkQ2G1TNzDoBXoXwgOApgqW5Zs6TRABkZksyFEELUj0nvmTPz9wC+r7Ds5QqP/1HNtp8C+NRkwVmQjAygvFySuWi+jh071t7W1vZjAOGQYlVC1IcWQJxGo5nZt2/fqxWftJQBcC2aWq18l2TezMXGAh9/DLz6KuDZsgaE29raftyxY8dQT0/PGyqVyjTFLYRoxrRaLWVnZ4dlZWV9DGBMxeflE7IFSE9Xvksyb+aWLwc2bgQcHc0diTmEe3p65kkiF6J+VCoVe3p65kK5ulX5+SaOR1RBrVbmlvv41L6usFJqNbB1KzB7NuDqau5ozEEliVyIhtH9H6oyb0sytwBqNeDtDdjZmTsSYTLvvAPY2ADz5pk7EiFEMyTJ3ALItLRmLidHuVc+dSrgVamQoRBCNJgkcwsgybyZW70aKCwEnnnG3JG0eF988UUbIup74sSJ2wMXkpOT7QMDA7sDwK5du1yHDRsW0NDXiYmJ8fvss8/cAWDSpEm+x44dcwQAZ2fn3g3Z765du1x//PHHVnXdzsvLKyIzM9OoAc+rVq1q+8gjj3Spe3RVGzJkSMC1a9dsAOCf//xn+27dunUfM2ZM1w0bNrj9/e9/79hYr6On1WrRv3//oJycHBUA2NjY9A0JCQnTfyUnJ9s39mvq1eV9NjRr1izvHTt2NOj+m4xmN7OiIqUuuyTzZqqkBFi1CoiOBiIizB1Ni7dx40aPPn36FKxfv96jd+/el5viNTdt2lSnErhlZWWwq+ae23//+19XFxeX8pEjR95qlOCawKFDh9L0P3/yySee+/fvT/H39y/TLco1dj81vS+GNm/e7Na9e/ciDw8PLQA4ODhok5KSquzWaSkWLVp09bHHHvMdM2ZMfn33IcnczM7r/ptLMm+mvvwSuHIFWLTI3JFYjBkz4BMXB+fG3Gd4OAo//fSO5kyV5Obmqo4ePeqyf//+5DFjxgT+61//MjqZazQaPPnkk94HDhxwIyKePn36tRdeeOHqokWLOu3du7dNSUmJKjIysmDDhg3nVRU6JUVFRQWvWLEi45577ikEgMcff9zn0KFDrT09Pcu2bdt2rnPnzpqoqKjg8PDwwj/++MMlJiYmJzg4uPiNN97oVFZWpnJ3d9ds2rTpXGFhoWr9+vWeKpWKN2/e3Padd9650KNHj+LHHnvM99KlS/YA8Pbbb1+47777bmVlZdnExMR0u3Llin3fvn0Lqmt1vXXr1tYvv/yyV3l5OXl4eGh+++23FMPnv/rqK7eKcfj4+Gh2797t8swzz3QBACLCr7/+mpSXl2cTExPTraCgwKa8vJzefffd89HR0QVeXl4RsbGxic8880znixcvOowaNSpw2rRp19zd3ctjY2NbrV+//sLly5dtqzqOhQsXdj537pzDhQsXHLy8vEqWLFmS+dhjj3UtKysjrVaLbdu2nY2IiCgxjHnDhg0es2fPvlbTv+dPP/3kvHDhQp/CwkKVu7u7ZsOGDem+vr5lUVFRwREREYVHjhxxKSwsVH322WfqZcuWdUpOTnZ68MEHc1atWnUZAO69917/zMxM+5KSEtWcOXOuLFq0qNLrffDBBx4ffvhhh7KyMurTp8+t9evXnweASZMm+Z0+fboVEfG0adOuLVmy5GpQUFDpzZs3bS9cuGDbpUsXTU2xV0cus5uZTEtrxrRaYOVKoFcvYPhwc0fT4n311Vdthg4dmtujR48Sd3d3zU8//WT0B4qVK1d6XrhwwT4hISE+JSUlYebMmdcBYPHixVfj4uISU1NT44uKilQbN250q2k/RUVFqsjIyFtpaWnxAwcOzH/++ec7658rLS2luLi4xFdeeeXKyJEjC06ePJmUmJiYMGHChJylS5d2DA4OLn3kkUey58yZcyUpKSkhOjq6YPbs2T4LFy68EhcXl/jNN9+cnTNnjh8APP/8850HDBhQkJaWFj9u3LibmZmZlS4tX7582Xbu3Ll+27dvP5ucnJzw7bffnq24TlVx6N6PjqtWrTqflJSU8Pvvvye5uLhoP/30U48RI0bkJiUlJSQmJsb369evsML7f6F9+/Zlhw4dSlmyZMkdRU+qOw4ASE1NdTx8+HDyzp071e+++67nk08+eSUpKSnh9OnTiV27di2tGPOxY8dcBg4cePvKRUlJiUp/iX3kyJH+JSUlNG/evC7ffffd2fj4+MTp06dfW7Ro0e3BLPb29tq4uLjExx57LHvixIkBH3300YWkpKT4TZs2tcvKyrIBgA0bNqTHx8cnnjx5MmHNmjUd9Mv1jh8/7rh161aP2NjYpKSkpASVSsWrV69u+9tvvzlnZmbapaamxqekpCQ89dRT1/XbREREFP73v/91qer3xhhyZm5m+oIxfn5mDUOYwp49QGIisGEDQGTuaCxGbWfQprJ582aPefPmXQWAmJiYnC+++MJj8ODBhbVtBwD//e9/W8+ZMydbf5m3Q4cO5QCwZ88e17fffrtjcXGx6ubNm7ZhYWFFqOHSsUqlwsyZM3MAYMaMGdfHjx9/+/78lClTbpesVqvV9mPHjvXOzs62Ky0tVfn4+JRUtb9ffvmldWpqqpP+cUFBgU1ubq7q999/d92+fXsaAEyePDl39uzZlTpSHjx4sFVUVFR+SEhIqeExGaoujv79+xcsWrTI56GHHsqZMmXKDX9/f23//v1vzZ4926+srEw1YcKEG3fffXdRze9q7ccBANHR0TddXFwYAAYMGHBrxYoVnS5evGg/efLkGxXPygEgNzfX1t3dXat/XPEy+9GjRx1TU1Odhg8fHgQo99g9PT31l/0xbty4mwDQs2fPooCAgCJfX98yAPDx8Sk5d+6cfceOHYvefPPNDrt3724DAFlZWXbx8fGOHTt2vP0BYu/eva5xcXHOPXv2DAWA4uJiVfv27TWTJk26mZGR4TB9+nSfBx54IHfcuHF5+m08PT01+isT9SHJ3MzUasDeHujcufZ1hZVZvlwpHjBxorkjafGuXLli8/vvv7smJyc7zZ07F+Xl5URErNVqL9Z3n4WFhfTMM8/4HjlyJCEgIKBs4cKFnYuLi+t0tZMMPuS5urreTkBz587tMn/+/Kxp06bl7tq1y3Xp0qVV/oVgZhw/fjzR2dnZJHP4q4vjtddeyxo7dmzud9995zZ48OCQ3bt3p44aNarg8OHDydu2bXObMWNG17lz516ZO3fu9dpeo7bjaNWq1e33Zc6cOTmDBw++9c0337iNHj068N133z1f8T6zjY0Nl5eXw8bGpuKu9K9FAQEBRSdPnkyq6nlHR0cGlA9eDg4Ot+NRqVTQaDS0a9cu10OHDrnGxsYmubq6aqOiooKLioru+HdnZpo4ceL1999/v1K3z7i4uIRvvvmm9erVqz03bdrksWXLlnQAKC4uJicnJ23F9Y0ll9nNTK0GfH2VojGiGTl6FDh0CFiwQAoIWIAvvvjCfdy4cTmXL18+c+nSpTNZWVmnvb29S/ft22fUZc0RI0bkrVmzpl1ZmXICd+XKFZvCwkIVAHTs2FGTm5ur2rlzp3tt+9FqtdCPcl+3bl3bqKioKgc85efn23Tp0qVMv55+uaura3l+fv7tLDVo0KC8119/vb3+8a+//uoEAP3798/Xb7d58+bWeXl5lTLb0KFDb/3xxx+uSUlJ9vpjMjaO+Ph4h6ioqKJly5Zl9ejR41ZcXJxjSkqKvbe3d9kzzzxz7ZFHHsk+fvy40bcxqjuOihISEuxDQ0NLXnzxxav333//zZMnT1Zar2vXrsWJiYkO1b1Wjx49inNycmz379/fCgBKSkooNjbW6LKMN2/etHFzcyt3dXXVnjhxwvHUqVOVZhdER0fn7dq1y/3SpUu2gPLepqSk2GdmZtqWl5fj0Ucfvfn6669fOnPmzO336OzZs449e/Y0+mpGRZJCzEympTVTK1YAbm7AzJnmjkQA2LJli8f48eNvGC578MEHb3z55Zcexmy/YMGCbG9v79KQkJDuwcHBYZ988olHu3btyqdNm5YdGhrafdiwYUE9e/asdYS5k5OT9o8//mgVGBjY/fDhw66vv/56ZlXrvfDCC5enTJni371799C2bdveHhAVExNzc/fu3W1CQkLC9u7d67J27dqM48ePtwoKCgrz9/fv/t5773kCwBtvvHH5l19+cQkICOi+fft2906dOlW6t9y5c2fNqlWr0seNGxcQHBwcNm7cuG7GxvHWW2+1DwwM7B4UFBRmZ2fHEyZMyN23b59raGho99DQ0LBt27Z5PPvss1eMeW8BoLrjqOjLL7/0CAoK6h4SEhKWmJjoNHv27Epn/vfdd1/uDz/8UO00L0dHR964cePZ559/3js4ODise/fuYYcOHTL6XnVMTEyuRqOhbt26dV+8eLFXVf/uffv2LX7xxRcvjRgxIigoKChs+PDhQRkZGXbp6el2gwYNCg4JCQl7+OGHuy1duvQioHygSE9Pd7jnnnvqPUuBqhvlaG0iIyM5NjbW3GHUWdu2ylXY1avNHYloNGo1EBCgjGB/801zR9OoiOgYM0fWdbtTp06l9+zZs8YRxkI0hvPnz9tNmTLF79dff001dyzGWr9+fZtjx445//vf/651hsWpU6fa9ezZ06/icjkzN6O8PKU4mJyZNzNSulUIs/H19S2bMWPGNX3RGGug0WjopZdeMvpKRlVkAJwZybS0ZkhKtwphdjNnzrxR+1qWY8aMGQ2O12o+uTRHMi2tGZLSrUIIM5Bkbkb6ZC5n5s2ElG4VQpiJSZM5EUUTUTIRpRHR89Ws8xARJRBRPBF9ZbC8nIhO6r52mDJOc1GrgVatgHbtzB2JaBRSulUIYSYmu2dORDYA3gcwEsBFAEeJaAczJxisEwjgbwAGMvMNImpvsIsiZu5lqvgsgX5amhQHawakdKsQwoxMeWYeBSCNmc8xcymAjQAerLDOXwC8z8w3AICZr6IFkTnmzYi+dOvixfLpzIJJC9TaNbcWqETU98EHH7z9l7asrAzu7u49a/t3ru/vQnFxMUVGRgbrCww1FVOOZvcC7qjBfBFAvwrrBAEAEf0CwAbAP5h5r+45RyKKBaAB8AYzf2vCWJscs5LM5SSumZDSrVZBWqA2PXO3QHVyctImJyc7FRQUkIuLC3/zzTetO3ToYLJM6+joyEOGDMn7+OOPPZ544omc2rdoHOYeAGcLIBDAUABTAHxERG10z/nqilNMBfAOEflX3JiIZhFRLBHFZmdnN1HIjeP6deDWLTkzbxakdGvdzJjhg6io4Eb9mjHDp7aX1bdA/eyzz9K/+eYboyq/6Wk0GsyaNctbX/Vs2bJl7QFg0aJFncLDw0MDAwO7T5kyxVerrVxaOyoqKvjw4cO3y3Y+/vjjPgEBAd0HDBgQdPnyZVv9OjNmzPAJDw8P/ec//9nhq6++cuvRo0dIaGho2N133x2UkZFhm5ycbL9+/XrP1atXd9BXgLt8+bLt/fff7x8eHh4aHh4e+sMPP7QCgKysLJuBAwcGBgQEdJ80aZJvTS1Qw8LCQoODg8MGDBgQVPH5quIAgN27d7voO5GFhoaG3bhxQ3X+/Hm7yMjI4JCQkLDAwMDue/fudQH+d1Vg6tSpXfQtUF955ZX2hlcAqjuOhQsXdh47dmzXPn36hIwfP75rbGysY0RERGhISEhYUFBQ2JkzZyqVbd2wYYOHvlmK3r333pu7ZcuWNgDw9ddfe8TExNxOsgcOHHDu1atXSGhoaFjv3r1DTp06VWmfeXl5qokTJ/pFRESEhoaGhn355ZdtAKC6eCZMmHBz48aNdfodayhTJvNLAAz/g3nrlhm6CGAHM5cxsxpACpTkDma+pPt+DsBBAJUuTzHzWmaOZOZIT88qq/9ZLBnJ3oxI6VarIC1Q79RSWqACwMMPP5yzadMm98LCQkpMTHQeMGDA7ed79uxZfPTo0aTExMSEJUuWXHr22We9K+7z73//e6dhw4blnTlzJvGnn35KfvHFF73z8vJU1cVz1113FZ0+fbrOt0MawpSX2Y8CCCSirlCS+GQoZ9mGvoVyRv4ZEbWDctn9HBG5Ayhk5hLd8oEA3jJhrE1O5pg3E2o1sHWrMoLdtdpy0MLQp59KC1RIC1RjjgNoeAtUAOjXr1/RxYsXHT766COPe++9945/n5ycHJtJkyZ1TU9PdyQiLisrqzTo5eDBg6337dvXZtWqVR0BpZZ6WlqafXXx2Nraws7Ojm/cuKGqGIupmOzMnJk1AOYC2AcgEcBmZo4noqVENEa32j4A14koAcABAIuZ+TqAUACxRHRKt/wNw1HwzYGcmTcTUrrVKuhboD711FO+Xl5eEe+9917HnTt3uld1WdxY+hao27dvP5uSkpLw5z//+VpjtkB98sknr6akpCS8995750tKSqrcr751aFJSUkJSUlLC1atXT7u5uTVa8qgujtdeey3r448/Pl9UVKQaPHhwyIkTJxz1LVC9vLxKZ8yY0fW9995rW9v+jTmOii1Qv/vuuzQnJyft6NGjA3fs2FHpE7S+BWpF0dHRN5csWeLzyCOP3HEf+7nnnvMaMmRIfmpqavzOnTvTSktLK73XzIytW7em6ePLzMw806dPn+Ka4ikrKyNTtaatiknvmTPz98wcxMz+zLxMt+xlZt6h+5mZeSEzhzFzBDNv1C3/Vfe4p+77J6aM0xzUasDDA2jd2tyRiHqT0q1WQ1qgSgvUJ5544tqiRYsuR0VF3XHFIC8vz8bb27sUANasWVNl1Y9hw4blrVy5soP+w98vv/ziVFM8WVlZNm3atNEY9kM3NXMPgGuxZFpaMyClW62GtECVFqj+/v5lL774YqXpz88991zWP/7xD+/Q0NAwjUZT8WkAyvup0WgoJCQkLCAgoPuLL77oVVM8e/bsaV3xcr6pSQtUMwkKAnr2BLZsMXckol5KSgBfX6B3b2WOeQshLVCFpbOEFqj33Xef/4oVKy726NGjyrEODSEtUC2IVgucPy9n5lZNSrcKYZHM3QK1uLiYxowZc9MUibwm0gLVDDIzgdJSSeZWS0q3CmHRzNkC1dHRkefOnVvp8r+pSTI3A5mWZuX0pVs3bJDSrUIIiyCX2c1ApqVZOSndKoSwMJLMzUDOzK2YlG4VQlggSeZmoFYDnToBjo61ryssjJRubRJaLfCf/6DV+vVo85//oFUDarvcdvbsWbsRI0b4+/r6hvv4+IQ/9thjPsXFxVXeJ0lPT7eLjo6uNFWrIsOOYHW1cOHCzi+//HIHY9dvaMc1Q2+99ZanvqjLiRMnHPU11uPj4x169+4d0tD9R0dHd0tISLAHlNrsQUFBYfpa7vXp+masinXwjfXaa695vvPOO0YXubFEcs/cDGSOuZWS0q1NYtMmuM2fD9/8fNgQgbVaUOvWKP/3v3F+0iTju2wZ0mq1GDt2bMDMmTOvzp8//6xGo8HUqVN958+f77VmzZqLhuuWlZXBz8+vbO/evedq269hRzBr8uyzz97uTLVly5Y2Y8aMufHWW29lAsCJEyeSjN2PVqsFM8PG5n+fZ2JjYx3Ly8spLCzs9tz2Q4cOpXTq1KnqSdwW4K9//ev1qKiokKeffrrJB641FjkzN4P0dEnmVklKt5rcpk1wmz4d3a5cgV1hIVS3bsGmqAiqK1dgN306um3ahBobmVRn586drg4ODtr58+dfB5Ta2atXr87YtGlTu/z8fNWqVavaDh8+PKB///5Bd999d7Bhj/P8/HzVn/70p27+/v7dR44c6d+jR48Q/dmfviNYcnKyfbdu3bpPnjzZNyAgoPvAgQMDCwoKCABWrlzZLjw8PDQ4ODjs/vvv98/Pz6/x725GRobtyJEj/YODg8OCg4Mrncnm5uaqBgwYEBQWFhYaFBR0u4NXXl6eaujQoQHBwcFhgYGB3T/66CN3AHjyySe9/P39uwcFBYXNmjXLG/jfVYFNmza5rV27tsO6des8+/XrFwTceQXgpZde6hAeHh4aFBQUtmDBgs6A0v/dz88vfNy4cX5BQUHdz549e0cTl3Xr1rV94IEHbtZ0jPHx8Q6DBw8O7N69e2jfvn2D9f3lY2Ji/KZNm9alZ8+eId7e3hG7du1ynThxol+3bt26x8TE+Om3nzZtWpfw8PDQgICA7vq4Ktq+fXvrXr16hYSFhYWOGjWqm77We1Xvh6urq9bb27vkwIEDdT6rtxSSzJtYWRmQkSHJ3OpI6VaT02qBefPgW1JS9d+lkhKo5s+Hb30uuZ85c8apZ8+edzRV8fDw0Hbq1Kk0ISHBAQDi4+Odv/vuu7NHjx5NNlxv+fLlnm3atCk/e/Zs/GuvvXYpISGhysvEFy5ccJw3b97VtLS0eDc3t/L169e7A8C0adNuxMXFJSYnJycEBwcXrVq1qsqSoXpz5szpMnjw4Pzk5OSE+Pj4hD59+hQbPu/s7KzdvXt3WkJCQuKhQ4dS/v73v3trtVps3769dceOHcuSk5MTUlNT48ePH5+XlZVl8/3337unpqbGp6SkJLz22mt3VJybNGlSrr4T25EjR1IMn9u+fXvrtLQ0x9OnTycmJiYmnDx50nnPnj0uumN1mDt3bnZaWlp8UFDQHdXljhw54tK/f/873ushQ4YEhYSEhPXo0SMEAGbOnOn7wQcfXIiPj09cvnz5xSeeeKKLft3c3FzbEydOJL3xxhsZkydPDli8ePGV1NTU+KSkJCd9mde33377UlxcXGJSUlL8L7/84nrkyJE7yrpmZmbavvbaa50OHz6ckpCQkNinT5/CV199tUNN70efPn1uHTx40Govucll9iaWkaH80ZLBb1ZGSrea3IEDaFVQgBrvP+fnw+bgQbQaPhy1lk6tq8GDB+dV1Tns119/dZk/f/5VALjrrruKg4KCquy05uXlVaLvFNa7d+/C9PR0BwA4duyY08svv+yVn59vc+vWLZshQ4bUeKvg119/dd26dasaUK4gtG3b9o6YtFotPf30096///67i0qlwtWrV+0vXrxo26dPn6IXXnjB54knnvB68MEHc6OjowvKysrg4OCgnTRpkt/o0aNvTpo0yejbFHv37m19+PDh1mFhYWEAUFhYqEpKSnLs1q1baadOnUpHjBhR5b9Bdna2XceOHcsMlxleZs/NzVWdOHHCZeLEif7650tLS2+PXfi///u/myqVCn369Cls27Ztmb6WelBQUNHZs2cd7r777qLPP//cY926de00Gg1lZ2fbnTp1yrFfv363a64fPHiw1dmzZx2joqJCAKXpSd++fQvatm1bXt370b59e01SUpLVjmSSZN7EZFqaFSopAVatAqKjgYgIc0fTbF26BDsi1Fhfmgh88SLqPI0gPDy86Ntvv72jEUpOTo4qMzPTPiwsrOTIkSPOzs7ODRpmZ29vfzt2GxsbLioqUgHArFmzum7dujVtwIABRatWrWp76NChBp39rVmzxuP69eu2Z86cSXRwcGAvL6+IoqIiVY8ePUqOHz+esG3bNreXXnrJa//+/XkrVqzIPHnyZOKOHTtab9261f3DDz9s//vvv6fU/ipKp7Cnn346c/HixXeU4U1OTrav6b1ycHDQ6o+9KuXl5XB1ddUkJSVV2QnT0dGRAcDGxuaO91SlUkGj0VBSUpL9e++91+HYsWOJnp6e5TExMX4Vu9UxMwYNGpS3c+dOdcX9V/d+FBcXq5ycnJqkXakpyGX2JibJ3ApJ6dYm4eWFMq0WNVbhYQZ5e6OspnWqMmbMmPzi4mKVfgS3RqPBk08+6TNx4sRrhq1HqzJgwICCjRs3ugPAsWPHHFNSUqrs6FWdwsJCVZcuXcpKSkpo48aNtTZ2GThwYP7y5cs99XFev379jqsVubm5Nu3atStzcHDgnTt3ul6+fNkeUEbgu7q6ap988smchQsXZp08edI5NzdXpevXnbt69eqMpKQko+8Jjxo1Ku+LL75op7/XrFar7S5dulTrCWBgYGCVXcv0PDw8tN7e3qWffvqpO6AMovvtt9+Mfk9v3Lhh4+TkpPXw8CjPyMiwPXjwYKVxFEOHDr0VGxvrEhcX5wAo4wlOnz7tUNP7kZKS4hAeHm50D3ZLI2fmTUytVsZQ+fiYOxJhFCnd2mSGDcMtV1eUFxVVf5Lh6oryoUPrfoldpVLh22+/TZs1a5bv8uXLO2m1WgwfPjx31apVl2rbdvHixdkPPfSQn7+/f3d/f//igICAYnd398oNs6vx/PPPX46Kigr18PDQ9OnTp6CgoKDGWwkffvjhhUcffdQ3KCionUqlwnvvvXf+3nvvvX3MM2fOzBk1alRAUFBQWI8ePQq7du1aDCiX8//2t795q1Qq2Nra8gcffHD+5s2bNqNHjw4oKSkhAHj11VczjI17/PjxefHx8Y533XVXCKDcq9+wYYPa1ta2xqsno0aNuvnf//7XdezYsVW2dwWAr7/++txf/vIX3zfffLOTRqOhcePG5QwYMMCoRDpgwICi8PDwQn9///BOnTqV9u3bt6DiOp07d9asWbMmffLkyd30l/CXLFlyyc3NTVvd+3H06FGXN99887IxMVgi6ZrWxKZOBX777X9n6MLC7d4NjB6tlG6dOtXc0Zidqbum6UezVzUIzsEB2s8/x7n6Tk+rL41Gg9LSUnJ2dub4+HiH++67L+js2bNx+svB4k4FBQU0cODA4GPHjiXZ2lrH+eIvv/zitHz58o7ffvutxf9lrq5rmnW8082ITEuzMlK6tUnpEvU5w3nmzCBX14bNM2+I/Px81eDBg4PLysqImfGvf/3rvCTy6rm4uPDLL798Wa1W2wcGBlbqo26Jrl69avfmm2/WepXGkkkyb2JqNfCnP5k7CmEUfenWt9+W0q1NaNIk5E6ciNMHD6LVxYuw8/ZG2dChuKUy0wgfd3d3bVxcXKJ5Xt06xcTE5Jk7hroYN26cVcVbFUnmTaioCMjKkmlpVmPlSindaiYqFWCK6WdCNFcm/axLRNFElExEaUT0fDXrPERECUQUT0RfGSyfTkSpuq/ppoyzqaSnK9/lMrsVUKuBLVuA2bOldKsQwuKZ7MyciGwAvA9gJICLAI4S0Q5mTjBYJxDA3wAMZOYbRNRet9wDwBIAkQAYwDHdtmZrON8YZFqaFZHSrUIIK2LKM/MoAGnMfI6ZSwFsBPBghXX+AuB9fZJm5qu65fcD+JGZc3TP/Qgg2oSxNglJ5lZCSrcKIayMKZO5FwDDOY0XdcsMBQEIIqJfiOh3Ioquw7YgollEFEtEsdnZ2RWftjhqNeDgAHTsaO5IRI2kdKv5KT1QW2H9+jb4z39aoRF6oEoL1P9p6haoffv2DTZ8PiQkJEzfyKY6hs1u6uruu+8Oys7Orte/i7UydwU4WwCBAIYCmALgIyJqY+zGzLyWmSOZOdLT09M0ETai9HRl8Ju5RuUKI0jpVvPbtMkNnTv3wJgxQXjyST888EAQOnfugU2b6tUxDfhfC9QxY8bcPH/+fJxarY67deuWav78+ZVOEuraArVdu3ZGF5CxFM8++2z23LlzrwP/a4GamJiY0L1795K6tkAtL7/z8KtqgXrr1i2btLQ0OwA4fvy4yeufT5ky5fqKFSssPyk0IlOmlUsADOuceeuWGboIYAczlzGzGkAKlORuzLZWR/qYWwEp3Wpemza5Yfr0brhyxQ6FhSrcumWDoiIVrlyxw/Tp3eqb0KUFqnlboI4dOzZn/fr1HgCwfv16j5iYmBz9c8nJyfZ9+/YNDgsLCw0LCwuteLyAUrhn9uzZ3vpYli9f3g4Azp8/bxcZGRmsP9Pfu3evCwBMnjz55vbt29vW9D43N6ZM5kcBBBJRVyKyBzAZwI4K63wL5awcRNQOymX3cwD2AbiPiNyJyB3AfbplVk2SuYWT0q3mpfRA9UVJSdV/l0pKVJg/37c+l9ylBap5W6BOmTLlxs6dO90BYN++fW3Gjx9/U/9c586dNT/99FNKQkJC4qZNm84tWLCgCyp455132rm5uZXHxcUlnjp1KvHzzz/3TEpKsv/00089RowYkZuUlJSQmJgY369fv0IA8PT0LC8tLaWsrKwWc6ndZKPZmVlDRHOhJGEbAJ8yczwRLQUQy8w78L+knQCgHMBiZr4OAET0KpQPBACwlJlzKr+K9cjNBW7ckDnmFm3PHiAxUSndSjX2+xCmcOBAK9RStxz5+TY4eLAVhg+XFqhW1AK1ffv25W5ubpq1a9e6BwQEFLm4uNz+RFZaWkqPP/64b0JCgpNKpcL58+crNWnZv39/66SkJOcdO3a4A0B+fr5NQkKCY//+/W/Nnj3br6ysTDVhwoQb+vcfANq2bau5cOGCfceOHa22eUpdmPTuLTN/z8xBzOzPzMt0y17WJXKwYiEzhzFzBDNvNNj2U2YO0H19Zso4m4KMZLcCUrrVvC5dsgNRzWVSiRgXL9arBeqpU6fu6Bhm2AIVUM5467pfQxVboGo0GgKUFqjvvffehZSUlITnnnvuckl1Vx6MZNgCNSkpKaFt27Zlhi1QIyIiil566SWvRYsWdbKzs8PJkycTJ0yYcGPXrl1thg4dGmjs6+hboCYlJSUkJSUlXLhwIW7BggXXgJrfq+paoE6YMOHGs88+6ztlypQ7TsyWLVvWoX379mWJiYkJZ86cSSgrK6u0LTPTypUrL+hjuXTp0pnx48fnjRo1quDw4cPJXl5epTNmzOiqH9QHACUlJdTQf1NrIkOxmogkcwunL926YIGUbjUXL68yaLU1XxJhJnh7SwtUK2yBOm3atBtPPfVU1vjx4+8onZqbm2vTqVOnMhsbG3zwwQdtKw6oA4CRI0fmfvjhh576bmenT592yMvLU6WkpNh7e3uXPfPMM9ceeeSR7OPHjzsDysC87Oxsu+Dg4BJjj9faSTnXJiLJ3MJJ6VbzGzbsFlxdy1HFWd1trq7lGDpUWqBaYQtUd3d37bJly7Iqrv/0009fjYmJ8d+4cWPb4cOH5zo5OVX6cLVgwYJr6enpDhEREaHMTB4eHmXff//92X379rmuWrWqo62tLTs7O5dv2LBBDQA///yzc+/evW/ZtaAP5tICtYnMmwesW6fcO5fbsRZGrQYCApQR7G++ae5oLJqpW6DeHs1e1aVoBwctPv/8HOpw37cxSAvUurGEFqiPPfaYz9ixY28++OCD1fZUt1bSAtXM9CPZJZFbICndajmURH0O8+f7Ij/fBkQMZoKrazn+/e/zTZ3IAWmBWleW0AI1PDy8qDkm8ppIMm8i+pM/YWGkdKvlmTQpFxMnnsbBg61w8aIdvL3LMHToLXNVW5IWqHVn7haozzzzTO1XgZoZSeZNgFlJ5vfea+5IRCVSutUyKT1QpQWqEEaS0exNIDtbyRcy+M3CSOlWi6fRmDsCIayDJPMmICPZLZSUbrVoJ07A0cMDvU6dQqVpTkKIO0kybwKSzC2QlG61aFot8Nhj8CsogM2jj8KvEZqmCdGsSTJvAunpyndJ5hZEX7p18WKZYmCBPv8c7ikpcGIGkpPhvH492jR0nzY2Nn31DTlGjRrVrbaGJxUlJyfbr169utaiL9YqKioqWN9Apja7du1yHTZsWKMN6Z00aZLvsWPHHAHg008/de/WrVv3fv36BR0+fNj50Ucf9alt+5oUFBTQXXfdFazRaJCcnGzv6OjYJyQkJEz/VV0b3MZQ37a1o0eP7nbmzJk6XZGSZN4E1GqgXTvAxcXckYjbpHSrxcrNhWrBAnQpKlL+PhUVQfX00/DNy2vY3ysHBwdtUlJSQmpqarydnR2vXLmyTi0yU1NTHTZt2tSgZF5WVufidS3Cpk2bzvft27cYAD777LN2H3744fkjR46k3HPPPYXr1q0zutBNVe/vu+++227MmDE39HPefXx8SvRlYZOSkhIscZrhE088cXXZsmUd67KNJPMmIN3SLIyUbrVoixahc3HxnX+biouheuYZdG6s1xg0aFBBWlqaw5UrV2zuvfde/6CgoLCePXuGHDlyxAkAdu/e7aI/cwsNDQ27ceOG6oUXXvCKjY11CQkJCXvllVfaV9xnVFRU8GOPPeajP/s/cOCAM6C0Gx07dmzXPn36hIwfP75rcnKyff/+/YOCgoLCBgwYEJSammoPVN/69IMPPvCIiIgIDQkJCZs6daqvRqOBRqNBTEyMX2BgYPegoKDb8fzzn/9sr293Onr06G6A0hp14sSJfhEREaGhoaG3W6YWFBTQ6NGju3Xr1q37yJEj/as7Qz106JBz7969Q4KDg8MiIiJCb9y4cce/zYEDB5x79eoVEhoaGta7d++QU6dOOQBKX3N93EFBQWFnzpxxqK5Nq/6qwKJFizodO3bMZfbs2X6zZ8/2NrwCUN1xVGxfWzH+zZs3t33ooYdu1vT7sH379ta9evUKCQsLCx01alQ3fQlbLy+viKeeesorJCQkLDw8PPTnn392HjRoUKCPj0/4W2+95QlU35K2oqrayVb3fkRHRxf89NNPrevy4a/GqWlEZANgPzMPM3qPohK1Guhdr4stwiSkdKvFOnECjl98gfYlJbgjsZSUQPXFF2g/dy6ye/ZEg+ptl5WVYd++fa3vu+++vGeffbZzz549C/fv3392x44drtOnT++alJSUsHLlyo6rVq06f999993Kzc1VOTs7a5ctW3Zp5cqVHQ4cOJBW3b6LiopUSUlJCXv27HGZNWtW19TU1HgASE1NdTxy5EiSi4sLDx8+PGDatGnX//rXv15/55132j7xxBM++/fvP6tvffryyy+f1Wg0yM3NtTl+/Ljj1q1bPWJjY5McHBz4z3/+c5fVq1e37dmzZ1FmZqadfv/Xrl2zAYBVq1Z1PH/+/BknJyfWL/v73//eadiwYXlbtmxJv3btmk1kZGTomDFj8t5++21PJycn7blz5+KPHDniNHDgwLCKx1NcXEzTpk3z37Bhw9khQ4YU5uTkqAw7ngFAz549i48ePZpkZ2eHb7/91vXZZ5/13rdv39l3333X88knn7zyxBNP5BQXF5NGo8HWrVvdOnbsWHbw4ME0AJXqzq9YsSLz8OHDrVesWJFxzz33FO7atctV/1x1xwEo7WtPnz4dX7HrXXFxMWVkZDgEBwffLl6TkZHhEBISEgYAd911V8Fbb711+bXXXut0+PDhlNatW2tfeOGFjq+++mqHFStWZAJAly5dSpOSkhIef/xxnxkzZvgdOXIkqaioSBUREdH92Wefzda3pPXw8NBmZmba9uvXL2Tq1Kk3VQZ1EQzbyTIz7r333oA9e/a4XLlyxbaq98PGxga+vr7Fv//+u/PgwYOr7NJXUY3JnJnLiUhLRG7M3OSVl5qD8nLg/Hlg/HhzRyIAKJ+stmxRRrC7uta+vmgy+kFv1Z2MlJUBjz4Kv2PHkFyf+jElJSUq/R/xfv365c+fP/9a7969Q7dt25YGKM1YZs2aZZuTk6Pq379/waJFi3weeuihnClTptzw9/c3agje1KlTcwBg1KhRBQUFBSp9Qo2Ojr7p4uLCAHDixIlWe/bsOQsATzzxRM4rr7ziDVTd+nTNmjUecXFxzj179gwFgOLiYlX79u01kyZNupmRkeEwffp0nwceeCB33LhxeQAQHBxcNG7cuK5jxoy5OW3atJsAcPDgwdb79u1rs2rVqo6694HS0tLsf/75Z5d58+Zd1b0fRVW1dj19+rRj+/bty4YMGVIIKD3gK66ja+TSNT093ZGIuKysjABgwIABt1asWNHp4sWL9pMnT74RERFRUlWbVqP+8Wo4DqD69rVZWVm2rq6ud0xw1F9m1z/++uuv3c6ePesYFRUVAgBlZWXUt2/f23Hpz+ojIiIKb926pXJ3d9e6u7tr7e3ttdeuXbNxdXXVVtWStkuXLrdft7p2siNGjMiv7v1o166dJiMjw+hLh8YUjSkAcIaIfgRwu4gDM0vtSyNcvqz8EZLL7BZCSrdarIQEOMTFoVV1I9e1WtCZM3BJTIRD9+51PzvX3zM3Zt3XXnsta+zYsbnfffed2+DBg0N2796dWnGdCRMm+MXFxTl36NCh9NChQ2kAQBUGU+oft2rVql7j8ZmZJk6ceP3999+v1BAmLi4u4Ztvvmm9evVqz02bNnls2bIl/cCBA6l79uxx/e6779xWrFjRKTk5OZ6ZsXXr1rSePXuapIPYc8895zVkyJD8H3/88WxycrL98OHDgwFgzpw5OYMHD771zTffuI0ePTrw3XffPT9mzJj848ePJ2zbts3tpZde8tq/f3+e/gy4NtUdx88//9yqulanrVq10paWltb40Y+ZMWjQoLydO3eqq3pef09dpVLd0eZWpVKhrKyMDFvSOjg4sJeXV0TFFrD6drKLFy+uVJmuuvejpKREVZcWrsZ8vt0O4CUAhwEcM/gSRpBpaRZESrdatLAwlISH45ZKhSoHJKlU4IgIFISGNuwyu6F+/frlf/bZZ20BZYS2u7u7xsPDQxsfH+8QFRVVtGzZsqwePXrciouLc3Rzcys37Hi2devW9KSkpAR9IgeAr7/+2h0A9u3b5+Lq6lretm3bSmeLvXv3vvXxxx+7A0pv8sjIyAKg6tan0dHRebt27XLXtx69cuWKTUpKin1mZqZteXk5Hn300Zuvv/76pTNnzjiXl5fj7Nmz9g888ED++++/f6mgoMAmNzfXZtiwYXkrV67soNV9Svrll1+cAGXcwIYNGzwA4OjRo44pKSmVRrL36NGj+OrVq3aHDh1yBoAbN26oKt7HzcvLs/H29i7VHU87/fKEhAT70NDQkhdffPHq/ffff/PkyZNOVbVpNfbfqrrjqImnp2d5eXk5FRYWVjtifejQobdiY2Nd4uLiHHTHozp9+rTRI8mra0lrqLp2sjW9H2q12qFPnz5FxsZR65k5M39u7M5EZTItzYJI6VaLplIBn32G9AEDEFZSRbq2swPWrUN6Y5Zof/PNNy9PmzbNLygoKMzJyUm7bt06NQC89dZb7X/99dfWRMTBwcFFEyZMyFWpVLCxseHg4OCwqVOnXluyZMnVivtzdHTk0NDQMI1GQ2vXrq3yTG/16tUXHnnkEb9///vfHdu2batZv359OlB969MXX3zx0ogRI4K0Wi3s7Ox41apVF5ydnbWPP/64n1bX/33p0qUXNRoNTZ06tWt+fr4NM9PMmTOvtmvXrvyNN964PGvWrC4hISFhWq2WfHx8Sg4cOJC2aNGiq5MnT+7arVu37gEBAcVhYWGVyuc6Ojryhg0bzs6bN69LcXGxytHRUXv48OEUw3Wee+65rJkzZ3Z98803O48cOfKmfvmXX37psXnz5ra2trbs6elZ9uqrr2b+/PPPrSq2aTX236q646htu3vuuSf3hx9+cKnYklWvc+fOmjVr1qRPnjy5W2lpKQHAkiVLLvXo0cOoD43VtaQ1VF072aSkJIeq3o+MjAxbBwcHNrxUX5taW6AS0UAA/wDgCyX5EwBm5m7GvkhTsNQWqK+8onwVFQEOUsfKfEpKAF9fZSTinj3mjsZqmbwFKoC//AXeX3wBz5KS/105dHCA9uGHkf3RR7hY19duKlFRUcH6gVvmjkX8z88//+y8YsWKDt9++22VH64s0SuvvNK+devW2gULFlT6P1NdC1RjPuN+AuBtAIMA3AUgUve9VkQUTUTJRJRGRM9X8fyjRJRNRCd1XzMNnis3WL7DmNezRGo10LmzJHKzk9KtVmPlSlx2dMQd9wodHaFduRKXzRWTsF6DBg0qHDp0aJ7Gigr9t2nTpnzu3Ll16vxmzAC4XGau86mMblrb+wBGArgI4CgR7WDmigNQNjHz3Cp2UcTMver6upZGrQb8/MwdRQsnpVutSuvW0P7rX7jw1FPwKyqCyskJ2nfewfnWrWHRRV3/+OOPZHPHIKr29NNPXzd3DHUxf/78OsdrzJn5ASJaTkQDiKiP/suI7aIApDHzOWYuBbARwIN1DdDaScEYCyClW63O9Om4ERSEIiIgOBiFjzyCm+aOSQhLZsyZeT/dd8P7ZAygtlMcLwCGZfguGuzLUAwR3QMgBcACZtZv40hEsQA0AN5g5m8rbkhEswDMAoAuXbrUEk7TKy0FLl6UZG52UrrV6ugHww27RxOybp1tow56E6I5qva/CBHN1/34EjMPq/DVWNcqdwLwY+YeAH4EYDhy3lc30GYqgHeIyL/ixsy8lpkjmTnS07NOZZabxIULALMkc7OS0q1WqzdO4AZ5oCdOmTsUISxeTZ93H9N9X1XPfV8CYNjtxlu37DZmvs7M+uH/HwPoa/DcJd33cwAOArC6gqgyLc0CSOlW66SUg/OjggIbPPqoH6QHqhA1qimZJxJRKoBgIjpt8HWGiE4bse+jAAKJqCsR2QOYDOCOUelE1Mng4RgAibrl7kTkoPu5HYCBAIyq3GRJpGCMmelLt86eLaVbrc3nn7sjJcUJSg9UZ6xf36ahu5QWqDVrKS1QiajvvHnzbjftyczMtLW1te3zyCOP1HivdtWqVW1rW6cqly9fth08eHBgfWKvi2p/mZl5CoDBANIAPGDwNVr3vUbMrAEwF8A+KEl6MzPHE9FSIhqjW20eEcUT0SkA8wA8qlseCiBWt/wAlHvmVpnMbW0Bb29zR9JCSelW65Sbq8KCBV2gL4lZVKTC00/7Ii9PWqA2U03ZAtXLy6v0xx9/bKN/fv369e4BAQGVCr00ls6dO2s6dOhQ9sMPP7Qy1WsAtYxmZ+YsZu7JzOcrfhmzc2b+npmDmNmfmZfplr3MzDt0P/+NmbvrXmMYMyfplv/KzBG65RHM/ElDD9Qc1GqgSxcln4gmJqVbrdeiRZ1RXHzn36biYhWeeUZaoEoL1Aa3QHVyctIGBAQU6a9CbNu2zWPs2LE5+ue/+uortx49eoSEhoaG3X333UEZGRmVBopfvnzZ9v777/cPDw8PDQ8PD9Un6qp+bwBg7NixN9evX9+2qve3sRgzml3Uk8wxNyMp3WqdTpxwxBdftEdJyZ2JpaREhS++aI+5c7PRwIYh0gK1ZbdABYDJkyfnfPnllx6dO3cus7Gx4c6dO5fpa6qPHDmyYPLkyUkqlQpvv/12u6VLl3b86KOP7qg8OHv2bJ+FCxdeuf/++wtSU1Pt77///sBz587FV/V7AwADBw68tXTp0kb7MFoVSeYmpFYDD9R6Q0I0upISYNUqIDoaiIgwdzTCWLpBb6i5B6ofjh1LRj3mqkkLVGmBqhcTE5O3dOlSrw4dOpTFxMTkGD6nVqvtx44d652dnW1XWlqq8vHxqfTh8Zdffmmdmpp6u9GLrqlNtb83nTt31ly9erVSA5bGJLM3TeTWLeDqVRn8ZhZSutU6JSQ4IC6uFXTNQyrRaglnzrggMbFexZH198yTkpISPv/88wx9a8uqvPbaa1kff/zx+aKiItXgwYNDTpw44VhxnQkTJviFhISEDRky5PZAMFO1QNXHnZ6eHvf2229f9vT0LI+Li0sYNmxY/urVqz0nT57sBwAHDhxIfeqpp7KPHz/u3Lt379CysrLbrUP1+8jMzDzTp0+fRrtHrG+BmpqaGr9z5840fcvROXPm5Hz33XdpTk5O2tGjRwfu2LHDtUePHiXHjx9PiIiIKHrppZe8Fi1a1Km2/Ru8F9UeR11boDo6OnKPHj0KP/zww44PP/zwDcPn5s6d2+XJJ5+8mpKSkvDee++dLykpqbQ9M+P48eOJ+liuXr162s3NTVvd701hYSE5ODiYdEpGTfPMdxLRjuq+TBlUc3BeN6pAknkTk9Kt1issrATh4begUlWdZFUqRkREAUJDpQWqtEBtcAvU5557LmvJkiUXK57R5+fn23Tp0qUMANatW1flfe5Bgwblvf7667fHTfz6669OAFDV7w0AxMXFOQYFBRndzrQ+arrMvkL3fTyAjgC+1D2eAuCKKYNqDmRampnoS7du2CClW62NUvYtHQMGhKH6Hqjp9bnEXh1pgdpyW6BGRkYWR0ZGVrpC8cILL1yeMmWKv5ubm2bQoEH5Fy5cqHQlaO3atRkzZ87sEhQUFFZeXk79+vXLv/vuuy9U9XsDAD/++KNrdHR0rrHHWh/GtECNrdjysKpl5mZpLVDfew/461+BzEygY0dzR9OCDB0KnDsHnD0rFd9MoClaoOIvf/HGF194wvDypoODFg8/nI0KA5EsibRAtUyW0AI1MjIyeM+ePWmenp6VrtTUVUNaoLYiotu9y4moKwCTzpdrDtRqwNER6NDB3JG0IFK6tXlYufIyHB3vvL/o6KjFypXSAlXUmblboF6+fNl2/vz5VxojkdfEmNHsCwAcJKJzAAiAL4DZpgyqOdBPS5MrvU1ISrc2D61ba/Gvf13AU0/5oahIBScnLd555zxat7bomq7SAtVymbMFaufOnTUPP/zwTVO/Tq1n5sy8F0AggPlQqrQFM/M+Uwdm7aT1aROT0q2WTqutbpR6VaZPv4GgoCIoPVAL8cgjN00XmhDWQfd/qMoPtbUmcyJyBrAYwFxmPgWgCxGNbtwQmx9J5k1MSrdaurjs7Gw3oxO6fjCci0t5Yw96E8IaabVays7OdgMQV9Xzxlxm/wzAMQADdI8vAdgCYFejRNgM3bwJ5OZKMm8yOTnAJ59I6VYLptFoZmZlZX2clZUVDmPrW6hUwMGDFwG44tQpudwiWjotgDiNRlPlfURjkrk/M08ioikAwMyFVLEygriDTEtrYqtXK1V6pHSrxerbt+9VKJ0RhRAmYMwn5FIicgLAAEBE/gAarWhDcyTJvAlJ6VYhhDDqzPwfAPYC8CGiDVB6iz9qwpisWnm5JHOTKS+v3IJOSrcKIYRRo9l/gFIF7lEAXwOIZOaDpg3LOiUlAe3aAcePA61bA23amDuiZkT/5iYbzP6R0q1CCAHAuNHs/wHQj5l3M/MuZr5GRGubIDarwgzMmAHk5QHff6+clcvIgkZi+ObOmKE8Bv5XunXxYnmzhRAtmjH3zLsCeI6Ilhgss6hSrpZg61bgxAnlZDE3F7A3abO7FmbbNuD0aeXNPXUK2L5dWb58OeDjA0ycaN74hBDCzIxJ5jcBjADQQddJzc20IVmfbduAyZOBYl3Jfmalsqg+54gGKCgA5sxRRqsDyvfZs4HDh6V0qxBC6BiTzImZNcz8JIBtAH4G0L6WbZQNiaKJKJmI0ojo+Sqef5SIsonopO5rpsFz04koVfc13dgDamrffw9MmqScNFb00EPK86IBliwBCiv0rSgsBGbNktKtQgihY8xo9tX6H5h5HRGdAfBUbRsRkQ2A9wGMBHARwFEi2sHMCRVW3cTMcyts6wFgCZTL+QzgmG7bG7AgzMBjjymDrKtSXq7c4s3MlFu69ZKUBHz4IVBUoQ1wUZEyEG7mTCndKoQQqCGZE1FrZs4DsEWXXPXUAIyZBxQFII2Zz+n2txHAgwAqJvOq3A/gR2bO0W37I4BoKKPpq3T9+nWsW7fujmXdu3fHXXfdhbKyMmzYsKHSNr169UKvXr1QWFiIzZs3V3o+MjIS4eHhyM3NxTfffFPpeXf3Abh2LRht217DAw9ULoh3+PA9SE/vhh9/zMLly3srPT9ixAj4+PggIyMD//nPfyo9Hx0djY4dO+LcuXM4fPhwpedHjx6Ndu3aITk5Gb/99lul58eNGwc3NzfExcWhqvawDz30EJydnXHy5EmcPHmy0vPTpk2DnZ0djh49ivj4+ErPP/roowCAX3/9FSkpd7Q4hp2dHaZNmwYAOHToENTqO7sPOjs746GHHgIA7N+/Hxcv3tnZsnXr1hi/YgVQUoK90dHIqtBHtu3163jgxAmAGTt37cL163f2UejYsSOio6MBANu3b0deXt4dz3t7e+Pee+8FAGzevBmFFc7+u3btiiFDhgAANmzYgLKysjueDwoKwt133w0AlX7vANP/7g0YMADBwcG4du0adu2q/Lt3zz33oFu3bsjKysLevY3/uyeEsCw1XWb/Svf9GIBY3fdjBo9r4wUgw+DxRd2yimKI6DQRbSUin7psS0SziCiWiGIr/rFtCufOVX153ZBWq5xgijrKz1cGHtT0Bp86pfwjCCFEC0esn+bT2DsmmgAgmpln6h4/DGWK21yDddoCKGDmEiKaDWASMw8nokUAHJn5n7r1XgJQxMwrqnu9yMhIrurs05R++w0YNKjmfKNSAb/8AvTv33RxNQvMwMCBwJEjVb/BRMCAAcDPP8s9jCZERMeYWWazCGFharrM3qemDZn5eC37vgTAx+Cxt26Z4T4Mr41+DOAtg22HVtj2YC2v1+T691fqmFy9Wv06np5Av35NF1OzQQR8+inQp0/le+YA4OioPC+JXAghahwAt7KG5xhAbSW3jgIIJKKuUJLzZABTDVcgok7MnKl7OAZAou7nfQBeIyJ33eP7APytltdrckTAZ58BY8ZUPQjOxkbyTYOEhABPPFF5EJy9vbI8ONh8sQkhhAUx2WV2ACCiPwF4B4ANgE+ZeRkRLQUQy8w7iOh1KElcAyAHwBPMnKTbdgaAv+t2tYyZP6vptcxxmV1v+3albonh1WCVCtiyBRg/3iwhNR+3bgG+voDhADcPD+DCBaBVK/PF1ULJZXYhLJNRyZyIwgGEAXDUL2Pm9SaMq87MmcwBJXE//LDSxMvWFvj6a2DCBLOF07xs3Qr8+c/Km+vgAGzYAMTEmDuqFkmSuRCWyZja7EsAvKv7Ggblvrb0Ja5gwgTA31/5OShIck2jiolRkjgA9O4tlzuEEKICYyrATYBSzjWLmR8D0BOAlHStgEg5eQSAf/1L7pM3qthYpcmKoyOwbp28uUIIUYExFeCKmFlLRBoiag3gKu4cpS50iouVPKOrNSIay8qVSunW9HTpKyuEEFUwJpnHElEbAB9BKRhTAKByuTEBtRrw8vrfFWHRCNRqZUDCokWSyIUQoho1JnMiIgCvM/NNAKuJaC+A1sx8uimCszbp6Uofc9GI3nlHmeM3b565IxFCCItV4z1zVoa6f2/wOF0SefXUaknmjSonB/jkE2DqVOWShxBCiCoZMwDuOBHdZfJIrFxJCXDpkiTzRrV6tTLP/JlnzB2JEEJYNGPumfcDMI2IzgO4BYCgnLT3MGlkVubCBaWcuCTzRlJSAqxaBURHAxER5o5GCCEsmjHJ/H6TR9EM6Dt8SjJvJF9+CVy5ogx8E0IIUaNaL7Mz83koU9GG634uNGa7lkafzP38zBpG86DVKtPRevUChtfWAkAIIUStZ+a6CnCRAIIBfAbADsCXAAaaNjTrolYDdnYyTqtR7NkDJCYqZVulQIwQQtTKmDPscVDKt94CAGa+DMDVlEFZo/R0oEsXZRaVaKDlywEfH6V7jRBCiFoZk8xLdVPUGACISFpVVUGmpTWSo0eBQ4eAp59WLnUIIYSolTHJfDMRrQHQhoj+AmA/lGpwwoAk80aiL936l7+YOxIhhLAatd4zZ+YVRDQSQB6U++YvM/OPJo/MihQUANnZkswbzLB0q6vcyRFCCGMZMzUNuuQtCbwa6enKd0nmDSSlW4UQol6M6Wc+nohSiSiXiPKIKJ+I8poiOGsh09IagZRuFUKIejPmzPwtAA8wc6Kpg7FWUjCmEUjpViGEqDdjBsBdqW8iJ6JoIkomojQier6G9WKIiIkoUvfYj4iKiOik7mt1fV6/qaSnA87OQPv25o7ESknpViGEaBBj+5lvAvAtgBL9QmbeXtNGRGQD4H0AIwFcBHCUiHYwc0KF9VwBzAdwpMIuzjJzLyPiMzu1WrnELvVN6mnDBindKoQQDWBMMm8NpYTrfQbLGECNyRxAFIA0Zj4HAES0EcCDABIqrPcqgDcBLDYmYEsk09IaQKsFVqyQ0q1CCNEAxkxNe6ye+/YCkGHw+CKUDmy3EVEfAD7MvJuIKibzrkR0AsqUuBeZ+aeKL0BEswDMAoAuXbrUM8yGYVaS+eDBZnl56yelW4UQosGMGc0eRET/IaI43eMeRPRiQ1+YiFQA3gZQ1YinTABdmLk3gIUAviKi1hVXYua1zBzJzJGenp4NDalebtwA8vLkzLzepHSrEEI0mDED4D4C8DcAZQDAzKcBTDZiu0tQuq3peeuW6bkCCAdwkIjSAfQHsIOIIpm5hJmv617vGICzAIKMeM0mJyPZG0BKtwohRKMwJpk7M/MfFZZpjNjuKIBAIupKRPZQPgDs0D/JzLnM3I6Z/ZjZD8DvAMYwcywReeoG0IGIugEIBHDOiNdscjLHvAGkdKsQQjQKY5L5NSLyx/8arUyAchm8RsysATAXwD4AiQA2M3M8ES0lojG1bH4PgNNEdBLAVgBzmDnHiFibnFR/qyd96dbZs6V0qxBCNJAxo9mfArAWQAgRXQKgBvBnY3bOzN8D+L7CsperWXeowc/bAGwz5jXMTa0G2rRRvkQdSOlWIYRoNMaMZj8H4F5d61MVM+ebPizrIdPS6kFKtwohRKOqNZkT0cIKjwEgF8AxZj5pmrCsh1oNhIWZOworI6VbhRCiURlzzzwSwBwo88a9AMwGEA3gIyJ61oSxWTxm5Z65nJnXgZRuFUKIRmfMPXNvAH2YuQAAiGgJgN1QBqkdg9KIpUXKygKKiyWZ14mUbhVCiEZnzJl5exjUZIcy37wDMxdVWN7iyLS0OpLSrUIIYRLGnJlvAHCEiL7TPX4ASkW2VqhcZ71FkWlpdSSlW4UQwiSMGc3+KhHtATBQt2gOM8fqfp5mssisgJyZ15GUbhVCCJMw5swcuuQdW+uKLYxaDXTooPQyF7XQl25duVJKtwohRCMzKpmLqskc82owA0eOAJmZQKdOQL9+UrpVCCFMSJJ5A6jVQP/+5o7Cwnz/vVKi9eZNQKVSBr25uABXryoj2KV0qxBCNDpJ5vWk0QAXLgBTppg7Egvy/ffAhAlAUdGdywsKlO/duzd9TEII0QIYMzVNVOHiRaC8XAa/3cYMzJpVOZEbeuEFZT0hhBCNSpJ5Pcm0tAqOHAFyc2te5+ZN4I+K3XSFEEI0lCTzetJPS5NkrpOZqdwjr4lKBVy+3DTxCCFECyLJvJ7UaiU3deli7kgsRKdOymC3mmi1QOfOTROPEEK0IJLM60mtBry9Zcr0bf36KVPPatKmDRAV1SThCCFESyLJvJ5kjnkFRMDatYCTU9XPOzkBa9ZIGVchhDABSeb1JMm8Cn/6E7B1q3LJQqVSEnerVsrjrVuV54UQQjQ6kyZzIoomomQiSiOi52tYL4aImIgiDZb9TbddMhHdb8o466q4WBnHJdPSqvCnPwHbtin3x6dPB/7zH2VCviRyIYQwGZMVjSEiGwDvAxgJ4CKAo0S0g5kTKqznCmA+gCMGy8IATAbQHUBnAPuJKIiZy00Vb11cuKB8lzPzarz9tnL/fNUqqfgmhBBNwJRn5lEA0pj5HDOXAtgI4MEq1nsVwJsAig2WPQhgIzOXMLMaQJpufxZBpqXVQK0GtmxRSrpKIhdCiCZhymTuBSDD4PFF3bLbiKgPAB9m3l3XbXXbzyKiWCKKzc7ObpyojSDJvAbvvAPY2ADz5pk7EiGEaDHMNgCOiFQA3gbwTH33wcxrmTmSmSM9PT0bL7haqNWAvb1Mma4kJwf45BNg6lTAq9JnLyGEECZiykYrlwD4GDz21i3TcwUQDuAgKdOVOgLYQURjjNjWrNRqwNe39oJnLc7q1cCtW8Az9f58JoQQoh5MmY6OAggkoq5EZA9lQNsO/ZPMnMvM7ZjZj5n9APwOYAwzx+rWm0xEDkTUFUAgAIsp6i3T0qpQUqIMeIuOBiIizB2NEEK0KCZL5sysATAXwD4AiQA2M3M8ES3VnX3XtG08gM0AEgDsBfCUpYxkBySZV2nDBuDKFaVnuRBCiCZF3ExaUkZGRnJsbKzJXyc/H2jdGnj9deD5amfOtzBaLRAeDjg4AMePS5W3ZoyIjjFzZO1rCiGakinvmTdL0vq0Cnv2AImJytm5JHIhhGhyMoSrjmRaWhWWLwd8fICJE80diRBCtEiSzOtIknkFR48Chw4BTz8tLeSEEMJMJJnXkVqt9A5p187ckViIlSuV0q1/+Yu5IxFCiBZLknkd6Ueyy61hSOlWIYSwEJLM60impRmQ0q1CCGERJJnXAbMyml1an0JKtwohhAWRZF4HOTnKPHM5M4eUbhVCCAsiybwOZCS7jpRuFUIIiyLJvA4kmetI6VYhhLAokszrQJI5lNKtK1YAvXoBw4ebOxohhBCQcq51olYDHh5KbfYWS0q3CiGExZEz8zqQaWlQzsqldKsQQlgUSeZ10OKnpcXGAgcPSulWIYSwMJLMjaTVKsm8RZ+Zr1ghpVuFEMICSTI3UlaWMiOrxSZzKd0qhBAWS5K5kVr8SHYp3SqEEBZLkrmRWnQyl9KtQghh0UyazIkomoiSiSiNiJ6v4vk5RHSGiE4S0c9EFKZb7kdERbrlJ4lotSnjNIY+mbfIAXBSulUIISyayeaZE5ENgPcBjARwEcBRItrBzAkGq33FzKt1648B8DaAaN1zZ5m5l6niqyu1GujUCXB0NHckTUxKtwohhMUz5Zl5FIA0Zj7HzKUANgJ40HAFZs4zeNgKAJswngZpsdPSpHSrEEJYPFMmcy8AGQaPL+qW3YGIniKiswDeAmA4uqorEZ0gokNENNiEcRqlRRaMkdKtQghhFcw+AI6Z32dmfwDPAXhRtzgTQBdm7g1gIYCviKhSEVUimkVEsUQUm52dbbIYNRogI6MFJnN96dbFi6V0qxBCWDBTJvNLAHwMHnvrllVnI4CxAMDMJcx8XffzMQBnAQRV3ICZ1zJzJDNHenp6NlbclWRkAOXlLTCZS+lWIYSwCqZM5kcBBBJRVyKyBzAZwA7DFYgo0ODh/wFI1S331A2gAxF1AxAI4JwJY61Ri5yWJqVbhRDCaphsNDsza4hoLoB9AGwAfMrM8US0FEAsM+8AMJeI7gVQBuAGgOm6ze8BsJSIygBoAcxh5hxTxVqbFpnMpXSrEEJYDZO2QGXm7wF8X2HZywY/z69mu20AtpkytrpQq5XiZz4+ta/bLOhLty5aJKVbhRDCCph9AJw1SE9XErltS+n+LqVbhRDCqkgyN4Ja3YLmmEvpViGEsDqSzI3QouaYS+lWIYSwOpLMa1FUBGRmtpBkLqVbhRDCKkkyr8X588r3FpHMpXSrEEJYJUnmtWgx09KkdKsQQlitljI+u95aTDLXl27dsEFKtwohhJWRM/NapKcDDg5Ax47mjsTEpHSrEEJYLUnmtVCrAV9fQNWc3ykp3SqEEFatOaeoRtEipqVJ6VYhhLBqksxr0eyTub506+zZUrpVCCGslCTzGuTlKQXRmnUyl9KtQghh9SSZ16DZj2SX0q1CCNEsSDKvQbNP5lK6VQghmgVJ5jVIT1e+N8tkLqVbhRCi2ZBkXgO1GnBxATw8zB2JCUjpViGEaDYkmddAP5K92RVEk9KtQgjRrEg51xqo1UC3buaOwgSkdKsQQjQrcmZeDeZmPMdcSrcKIUSzYtJkTkTRRJRMRGlE9HwVz88hojNEdJKIfiaiMIPn/qbbLpmI7jdlnFW5dk0Z6N3skrmUbhVCiGbHZMmciGwAvA9gFIAwAFMMk7XOV8wcwcy9ALwF4G3dtmEAJgPoDiAawAe6/TWZZjstTUq3CiFEs2PKM/MoAGnMfI6ZSwFsBPCg4QrMnGfwsBUA1v38IICNzFzCzGoAabr9NZlmOS0tPV1KtwohRDNkygFwXgAyDB5fBNCv4kpE9BSAhQDsAeiHVnsB+L3CtpVKlBHRLACzdA9LiCiu4WHfqUePxt5jvbQDcK3R9vbWW8qX+TXucVmW5npsweYOQAhRmdlHszPz+wDeJ6KpAF4EML0O264FsBYAiCiWmSNNE6V5Nddja67HBTTfYyOiWHPHIISozJSX2S8B8DF47K1bVp2NAMbWc1shhBCixTJlMj8KIJCIuhKRPZQBbTsMVyCiQIOH/wcgVffzDgCTiciBiLoCCATwhwljFUIIIayWyS6zM7OGiOYC2AfABsCnzBxPREsBxDLzDgBzieheAGUAbkB3iV233mYACQA0AJ5i5vJaXnKtqY7FAjTXY2uuxwU032NrrsclhFUjZq59LSGEEEJYLKkAJ4QQQlg5SeZCCCGElbO6ZG5EiVgHItqke/4IEfmZIcw6M+K47iGi40SkIaIJ5oixvow4toVElEBEp4noP0Tka44466oh5YotXW3HZrBeDBExETW7aXhCWBOrSuZGloh9HMANZg4A8C8AbzZtlHVn5HFdAPAogK+aNrqGMfLYTgCIZOYeALZCKe1r0RpSrtjSGXlsICJXAPMBHGnaCIUQFVlVMocRJWJ1jz/X/bwVwAgii+/zaUzp23RmPg1Aa44AG8CYYzvAzIW6h79DqStg6RpSrtjSGfP/DABehfJhubgpgxNCVGZtybyqErEVy7zeXoeZNQByAbRtkujqz5jjslZ1PbbHAewxaUSNw6jjIqKniOgslDPzeU0UW0PVemxE1AeADzPvbsrAhBBVs7ZkLpoxIvozgEgAy80dS2Nh5veZ2R/Ac1DKFVs9IlJBuWXwjLljEUIorC2ZG1Pm9fY6RGQLwA3A9SaJrv6ac/lao45NVzzoBQBjmLmkiWJriIaUK7Z0tR2bK4BwAAeJKB1AfwA7ZBCcEOZjbcm81hKxusf6Zi0TAPyXLb8yjjHHZa2MKevbG8AaKIn8qhlirI+GlCu2dDUeGzPnMnM7ZvZjZj8o4xzGMLM0YRHCTKwqmevugetLxCYC2KwvEUtEY3SrfQKgLRGlQWmtWu20GkthzHER0V1EdBHARABriCjefBEbz8h/s+UAXABs0U3jsvgPMkYe11wiiieik1B+F43uCGhORh6bEMKCSDlXIYQQwspZ1Zm5EEIIISqTZC6EEEJYOUnmQgghhJWTZC6EEEJYOUnmQgghhJWTZC6aHBEd1BcYIaLviahNA/c3lIh2VfPc17pubAsa8hpCCGHJbM0dgGh+dI1tiJlrbQrDzH8yYRwdAdyl66Bn7Da2unnWQghhNeTMvIUgopd0/al/1p2tLtIt9yeivUR0jIh+IqIQ3fJ1RLSKiH4lonOGPdSJaDERHdWd8b6iW+an2/96AHEAfIjoQyKK1RVOeaWauNKJqJ2u9/dJ3ZeaiA7onr+PiH4jpZf7FiJy0S2PJqIkIjoOYHw1h/0DAC/dPgfrrgj8W/c4joiidPv6BxF9QUS/APiiMd5vIYRoSpLMWwAiugtADICeUHpUG9bQXgvgr8zcF8AiAB8YPNcJwCAAowG8odvXfQACobTJ7AWgLxHdo1s/EMAHzNydmc8DeIGZIwH0ADCEiHpUFyMzr9b1/b4LSpeut4moHZTmJPcycx8AsQAWEpEjgI8APACgL4CO1ex2DICzzNyLmX/SLXPWvc6TAD41WDdM9zpTqotRCCEslVxmbxkGAviOmYsBFBPRTgDQneXeDaWMqn5dB4PtvtVdKk8gog66Zffpvk7oHrtASeIXAJxn5t8Ntn+IiGZB+T3rBCVhnq4l1n9Dqae/k4hG67b5RRefPYDfAIQAUDNzqu44vgQwy8j34msAYObDRNTa4H79DmYuMnIfQghhUSSZt2wqADd1Z6pVMexeRgbfX2fmNYYrEpEfgFsGj7tCOdO/i5lvENE6AI41BUNEjwLwhVIXXP9aP1Y8Wyai6uI1RsX6xfrHtyquKIQQ1kIus7cMvwB4gIgcdWfjowGAmfMAqIloIqAMXCOinrXsax+AGQb3rr2IqH0V67WGkiBzdWf1o2raKRHpL/P/2WDg3O8ABhJRgG6dVkQUBCAJgB8R+evWq8ul8Um6fQ0CkMvMuXXYVgghLJKcmbcAzHxU14nsNIArAM4A0CexaQA+JKIXAdhB6bt9qoZ9/UBEoQB+0136LgDwZwDlFdY7RUQnoCTeDCgfKGoyF4AHgAO6/cYy80zd2frXRKS//P8iM6foLt/vJqJCAD9B6bFtjGJdXHYAZhi5jRBCWDTpmtZCEJELMxcQkTOAwwBmMfNxc8fVlIjoIIBF0ndbCNHcyJl5y7GWiMKg3Lf+vKUlciGEaM7kzFwIIYSwcjIATgghhLByksyFEEIIKyfJXAghhLByksyFEEIIKyfJXAghhLBy/w/hhGFuEAMUnAAAAABJRU5ErkJggg==", + "text/plain": [ + "
" + ] }, "metadata": { "needs_background": "light" @@ -501,14 +1252,16 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 20, "metadata": {}, "outputs": [ { "data": { - "text/plain": "0.0027891187222710556" + "text/plain": [ + "0.0008138491285430982" + ] }, - "execution_count": 19, + "execution_count": 20, "metadata": {}, "output_type": "execute_result" } @@ -534,9 +1287,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.6.9-final" + "version": "3.7.11" } }, "nbformat": 4, "nbformat_minor": 2 -} \ No newline at end of file +} diff --git a/setup.py b/setup.py index 40a05bdd..024aace7 100644 --- a/setup.py +++ b/setup.py @@ -37,12 +37,12 @@ long_description_content_type='text/markdown', license='Apache License 2.0', packages=[pkg for pkg in find_packages() if pkg.startswith('aif360')], - python_requires='>=3.6', + python_requires='>=3.7', install_requires=[ 'numpy>=1.16', 'scipy>=1.2.0,<1.6.0', 'pandas>=0.24.0', - 'scikit-learn>=0.22.1', + 'scikit-learn>=1.0', 'matplotlib', 'tempeh', ], diff --git a/tests/sklearn/test_datasets.py b/tests/sklearn/test_datasets.py index 22ee3539..f7dad27e 100644 --- a/tests/sklearn/test_datasets.py +++ b/tests/sklearn/test_datasets.py @@ -1,12 +1,20 @@ from functools import partial import numpy as np +from numpy.testing import assert_array_equal import pandas as pd +from pandas.api.types import is_numeric_dtype +from pandas.testing import assert_frame_equal import pytest +from sklearn.compose import make_column_transformer +from sklearn.preprocessing import OneHotEncoder, minmax_scale -from aif360.sklearn.datasets import fetch_adult, fetch_bank, fetch_german -from aif360.sklearn.datasets import standardize_dataset -from aif360.sklearn.datasets import fetch_compas, ColumnAlreadyDroppedWarning +from aif360.datasets import ( + AdultDataset, GermanDataset, CompasDataset, LawSchoolGPADataset, + MEPSDataset19, MEPSDataset20, MEPSDataset21) +from aif360.sklearn.datasets import ( + standardize_dataset, NumericConversionWarning, fetch_adult, fetch_bank, + fetch_german, fetch_compas, fetch_lawschool_gpa, fetch_meps) df = pd.DataFrame([[1, 2, 3, 'a'], [5, 6, 7, 'b'], [np.NaN, 10, 11, 'c']], @@ -36,46 +44,39 @@ def test_multilabel_basic(): assert multilabel.y.shape == (3, 2) assert multilabel.X.shape == (3, 2) -def test_series_input_basic(): - prot_attr = pd.Series(['c', 'b', 'a'], name='Z2') - custom = basic(prot_attr=prot_attr) - assert (custom.X.index.droplevel() == prot_attr).all() - - custom2 = basic(prot_attr=[prot_attr, 'Z']) - ix = pd.DataFrame([['c', 'a'], ['b', 'b'], ['a', 'c']], columns=['Z2', 'Z']) - assert (custom2.X.index.droplevel().to_frame() == ix.to_numpy()).all(None) - - with pytest.raises(TypeError): - basic(prot_attr=[prot_attr.to_numpy()]) # list of arrays is not allowed - - with pytest.raises(KeyError): - basic(prot_attr=prot_attr.to_numpy()) # ['c', 'b', 'a'] are not labels - -def test_series_target_basic(): - target = pd.Series([3, 4, 5], name='y2') - custom = basic(target=target) - assert (custom.y.to_numpy() == target).all() - - Y = pd.DataFrame([[3, 3], [4, 7], [5, 11]], columns=['y2', 'y']) - custom2 = basic(target=[target, 'y']) - assert (custom2.y.to_numpy() == Y).all(None) - def test_sample_weight_basic(): """Tests returning sample_weight on a toy example.""" with_weights = basic(sample_weight='X2') assert len(with_weights) == 3 assert with_weights.X.shape == (3, 2) +def test_array_args_basic(): + """Tests passing explicit arrays instead of column labels for prot_attr, + target, and sample_weight. + """ + # single array + pa_array = basic(prot_attr=pd.Index([1, 0, 1], name='ZZ')) + assert pa_array.X.columns.equals(pd.Index(['X1', 'X2', 'Z'])) + assert pa_array.X.index.names == ['ZZ'] + # mixed array and label + tar_array_mixed = basic(target=[np.array([4, 8, 12]), 'y']) + assert tar_array_mixed.y.shape == (3, 2) + assert tar_array_mixed.X.shape == (3, 3) + assert tar_array_mixed.y.index.equals(tar_array_mixed.X.index) + # sample weight + sw_array = basic(sample_weight=[0.5, 0.4, 2.1]) + assert sw_array.sample_weight.index.equals(sw_array.X.index) + def test_usecols_dropcols_basic(): """Tests various combinations of usecols and dropcols on a toy example.""" - assert basic(usecols='X1').X.columns.tolist() == ['X1'] + assert basic(usecols=['X1']).X.columns.tolist() == ['X1'] assert basic(usecols=['X1', 'Z']).X.columns.tolist() == ['X1', 'Z'] - assert basic(dropcols='X1').X.columns.tolist() == ['X2', 'Z'] + assert basic(dropcols=['X1']).X.columns.tolist() == ['X2', 'Z'] assert basic(dropcols=['X1', 'Z']).X.columns.tolist() == ['X2'] - assert basic(usecols='X1', dropcols=['X2']).X.columns.tolist() == ['X1'] - assert isinstance(basic(usecols='X2', dropcols=['X1', 'X2'])[0], + assert basic(usecols=['X1'], dropcols=['X2']).X.columns.tolist() == ['X1'] + assert isinstance(basic(usecols=['X2'], dropcols=['X1', 'X2'])[0], pd.DataFrame) def test_dropna_basic(): @@ -83,21 +84,62 @@ def test_dropna_basic(): basic_dropna = partial(standardize_dataset, df=df, prot_attr='Z', target='y', dropna=True) assert basic_dropna().X.shape == (2, 3) - assert basic(dropcols='X1').X.shape == (3, 2) + assert basic(dropcols=['X1']).X.shape == (3, 2) +@pytest.mark.filterwarnings('ignore', category=NumericConversionWarning) def test_numeric_only_basic(): """Tests numeric_only on a toy example.""" - assert basic(prot_attr='X2', numeric_only=True).X.shape == (3, 2) - assert (basic(prot_attr='X2', dropcols='Z', numeric_only=True).X.shape - == (3, 2)) + num_only = basic(numeric_only=True) + assert num_only.X.shape == (3, 2) + assert 'Z' in num_only.X.index.names + num_only_X2 = basic(prot_attr='X2', numeric_only=True) + num_only_X2_dropZ = basic(prot_attr='X2', dropcols=['Z'], numeric_only=True) + assert num_only_X2.X.equals(num_only_X2_dropZ.X) +@pytest.mark.filterwarnings('error', category=NumericConversionWarning) +def test_numeric_only_warnings(): + with pytest.raises(UserWarning): + basic(numeric_only=True) # prot_attr has non-numeric + with pytest.raises(UserWarning): + basic(numeric_only=True, prot_attr='y', target='Z') # y has non-numeric + +def test_multiindex_cols(): + """Tests DataFrame with MultiIndex columns.""" + cols = pd.MultiIndex.from_arrays([['X', 'X', 'y', 'Z'], [1, 2, '', '']]) + df = pd.DataFrame([[1, 2, 3, 'a'], [5, 6, 7, 'b'], [None, 10, 11, 'c']], + columns=cols) + multiindex = standardize_dataset(df, prot_attr='Z', target='y') + assert multiindex.X.index.names == ['Z'] + assert multiindex.y.name == 'y' + assert multiindex.X.columns.equals(cols.drop('y', level=0)) + +@pytest.mark.filterwarnings('ignore', category=NumericConversionWarning) def test_fetch_adult(): """Tests Adult Income dataset shapes with various options.""" adult = fetch_adult() assert len(adult) == 3 assert adult.X.shape == (45222, 13) + assert len(adult.X.index.get_level_values('race').categories) == 2 + assert len(adult.X.race.cat.categories) > 2 assert fetch_adult(dropna=False).X.shape == (48842, 13) + # race is kept since it's binary assert fetch_adult(numeric_only=True).X.shape == (48842, 7) + num_only_bin_race = fetch_adult(numeric_only=True, binary_race=False) + # race gets dropped since it's categorical + assert num_only_bin_race.X.shape == (48842, 6) + # still in index though + assert 'race' in num_only_bin_race.X.index.names + +def test_adult_matches_old(): + """Tests Adult Income dataset matches original version.""" + X, y, _ = fetch_adult() + X.race = X.race.cat.set_categories(['Non-white', 'White']).fillna('Non-white') + + adult = AdultDataset() + adult = adult.convert_to_dataframe(de_dummy_code=True)[0].drop(columns=adult.label_names) + + assert_frame_equal(X.reset_index(drop=True), adult.reset_index(drop=True), + check_dtype=False, check_categorical=False, check_like=True) def test_fetch_german(): """Tests German Credit dataset shapes with various options.""" @@ -106,25 +148,130 @@ def test_fetch_german(): assert german.X.shape == (1000, 21) assert fetch_german(numeric_only=True).X.shape == (1000, 9) +def test_german_matches_old(): + """Tests German Credit datasets matches original version.""" + column_map = { + 'checking_status': 'status', + 'duration': 'month', + 'savings_status': 'savings', + 'installment_commitment': 'investment_as_income_percentage', + 'other_parties': 'other_debtors', + 'property_magnitude': 'property', + 'other_payment_plans': 'installment_plans', + 'existing_credits': 'number_of_credits', + 'job': 'skill_level', + 'num_dependents': 'people_liable_for', + 'own_telephone': 'telephone', + } + X, y = fetch_german() + # marital status was not included before and age was binary + X = X.drop(columns=['marital_status', 'age']).reset_index('age') + # columns are named differently in the old version + X = X.rename(columns=column_map) + + old = GermanDataset() + old = old.convert_to_dataframe(de_dummy_code=True)[0].drop(columns=old.label_names) + + # categories in the old version were not renamed so just map both to ints + X = X.apply(lambda c: c.factorize()[0] if not is_numeric_dtype(c) else c) + old = old.apply(lambda c: c.factorize()[0] if not is_numeric_dtype(c) else c) + + assert_frame_equal(X.reset_index(drop=True), old.reset_index(drop=True), + check_like=True) + def test_fetch_bank(): """Tests Bank Marketing dataset shapes with various options.""" bank = fetch_bank() assert len(bank) == 2 assert bank.X.shape == (45211, 15) - assert fetch_bank(dropcols=[]).X.shape == (45211, 16) + assert fetch_bank(dropcols=None).X.shape == (45211, 16) assert fetch_bank(numeric_only=True).X.shape == (45211, 7) -@pytest.mark.filterwarnings('error', category=ColumnAlreadyDroppedWarning) +# TODO: bank doesn't match old + +@pytest.mark.filterwarnings('ignore', category=NumericConversionWarning) def test_fetch_compas(): """Tests COMPAS Recidivism dataset shapes with various options.""" compas = fetch_compas() assert len(compas) == 2 assert compas.X.shape == (6167, 10) assert fetch_compas(binary_race=True).X.shape == (5273, 10) - with pytest.raises(ColumnAlreadyDroppedWarning): - assert fetch_compas(numeric_only=True).X.shape == (6172, 6) + assert fetch_compas(numeric_only=True).X.shape == (6172, 8) + assert fetch_compas(numeric_only=True, binary_race=True).X.shape == (5278, 9) + +def test_compas_matches_old(): + """Tests COMPAS Recidivism dataset matches original version.""" + X, y = fetch_compas() + X.race = X.race.cat.set_categories(['Not Caucasian', 'Caucasian']).fillna('Not Caucasian') + + compas = CompasDataset() + compas = compas.convert_to_dataframe(de_dummy_code=True)[0].drop(columns=compas.label_names) + + assert_frame_equal(X.reset_index(drop=True), compas.reset_index(drop=True), + check_dtype=False, check_categorical=False, check_like=True) + +def test_fetch_lawschool_gpa(): + """Tests Law School GPA dataset shapes with various options.""" + gpa = fetch_lawschool_gpa() + assert len(gpa) == 2 + assert gpa.X.shape == (22342, 3) + assert gpa.y.nunique() > 2 # regression + assert fetch_lawschool_gpa(numeric_only=True, dropna=False).X.shape == (22342, 3) + +def test_lawschool_matches_old(): + """Tests Law School GPA dataset matches original version.""" + X, y = fetch_lawschool_gpa(numeric_only=True) + + law = LawSchoolGPADataset() + law = law.convert_to_dataframe()[0].drop(columns=law.label_names) + + assert_array_equal(minmax_scale(X), law) + +@pytest.mark.parametrize("panel", [19, 20, 21]) +def test_cache_meps(panel): + """Tests if cached MEPS matches raw.""" + meps_raw = fetch_meps(panel, cache=False, accept_terms=True)[0] + fetch_meps(panel, cache=True, accept_terms=True) + meps_cached = fetch_meps(panel, cache=True)[0] + assert_frame_equal(meps_raw, meps_cached, check_dtype=False) + assert_array_equal(meps_raw.to_numpy(), meps_cached.to_numpy()) + +@pytest.mark.parametrize( + "panel, cls", + [(19, MEPSDataset19), (20, MEPSDataset20), (21, MEPSDataset21)]) +def test_meps_matches_old(panel, cls): + """Tests MEPS datasets match original versions.""" + usecols = ['REGION', 'AGE', 'SEX', 'RACE', 'MARRY', 'FTSTU', + 'ACTDTY', 'HONRDC', 'RTHLTH', 'MNHLTH', 'HIBPDX', + 'CHDDX', 'ANGIDX', 'MIDX', 'OHRTDX', 'STRKDX', 'EMPHDX', + 'CHBRON', 'CHOLDX', 'CANCERDX', 'DIABDX', 'JTPAIN', + 'ARTHDX', 'ARTHTYPE', 'ASTHDX', 'ADHDADDX', 'PREGNT', + 'WLKLIM', 'ACTLIM', 'SOCLIM', 'COGLIM', 'DFHEAR42', + 'DFSEE42', 'ADSMOK42', 'PCS42', 'MCS42', 'K6SUM42', + 'PHQ242', 'EMPST', 'POVCAT', 'INSCOV'] + educols = ['EDUCYR', 'HIDEG'] + meps = fetch_meps(panel, accept_terms=True, usecols=usecols + educols) + assert len(meps) == 3 + meps.X.RACE = meps.X.RACE.factorize(sort=True)[0] + MEPS = cls() + assert_array_equal(pd.get_dummies(meps.X.drop(columns=educols)), MEPS.features) + assert_array_equal(meps.y.factorize(sort=True)[0], MEPS.labels.ravel()) + +@pytest.mark.parametrize("panel", [19, 20, 21]) +def test_fetch_meps(panel): + """Tests MEPS datasets shapes with various options.""" + meps = fetch_meps(panel, accept_terms=True, dropna=False) + meps_dropna = fetch_meps(panel, dropna=True) + assert meps_dropna.X.shape[0] < meps.X.shape[0] + meps_numeric = fetch_meps(panel, accept_terms=True, numeric_only=True) + assert meps_numeric.X.shape[1] == 5 def test_onehot_transformer(): """Tests that categorical features can be correctly one-hot encoded.""" X, y = fetch_german() - assert len(pd.get_dummies(X).columns) == 63 + ohe = make_column_transformer( + (OneHotEncoder(), X.dtypes == 'category'), + remainder='passthrough', verbose_feature_names_out=False) + dum = pd.get_dummies(X) + assert ohe.fit_transform(X).shape[1] == dum.shape[1] == 63 + assert dum.columns.symmetric_difference(ohe.get_feature_names_out()).empty diff --git a/tests/sklearn/test_metrics.py b/tests/sklearn/test_metrics.py index f6133dfc..9805581d 100644 --- a/tests/sklearn/test_metrics.py +++ b/tests/sklearn/test_metrics.py @@ -1,3 +1,5 @@ +from functools import partial + import numpy as np import pytest from numpy.testing import assert_almost_equal @@ -125,3 +127,17 @@ def test_make_scorer(func, is_ratio): # The lower the better assert_almost_equal(-abs(actual), expected, 3) assert_almost_equal(-abs(actual_fliped), expected, 3) + +def test_explicit_prot_attr_array(): + """Tests that metrics work with explicit prot_attr arrays.""" + prot_attr = y.index.to_flat_index()#y.index.get_level_values('sex') + y_arr = y.to_numpy() + # ratio + di = partial(disparate_impact_ratio, priv_group=(1, 1), sample_weight=sample_weight) + assert di(y_arr, y_pred, prot_attr=prot_attr) == di(y, y_pred) + # difference + aoe = partial(average_odds_error, priv_group=(1, 1), sample_weight=sample_weight) + assert aoe(y_arr, y_pred, prot_attr=prot_attr) == aoe(y, y_pred) + # index + ind = partial(between_group_generalized_entropy_error, priv_group=(1, 1)) + assert ind(y_arr, y_pred, prot_attr=prot_attr) == ind(y, y_pred) diff --git a/tests/sklearn/test_reweighing.py b/tests/sklearn/test_reweighing.py index 1dbfc37d..3d379ce0 100644 --- a/tests/sklearn/test_reweighing.py +++ b/tests/sklearn/test_reweighing.py @@ -35,11 +35,11 @@ def test_gridsearch(new_adult): # UGLY workaround for sklearn issue: https://stackoverflow.com/a/49598597 def score_func(y_true, y_pred, sample_weight): - idx = y_true.index.to_flat_index() - return accuracy_score(y_true, y_pred, sample_weight=sample_weight[idx]) - scoring = make_scorer(score_func, **{'sample_weight': sample_weight}) + return accuracy_score(y_true, y_pred, sample_weight=sample_weight.iloc[y_true.index]) + scoring = make_scorer(score_func, sample_weight=sample_weight) params = {'estimator__C': [1, 10], 'reweigher__prot_attr': ['sex']} clf = GridSearchCV(rew, params, scoring=scoring, cv=5) - clf.fit(X, y, sample_weight=sample_weight) + # need to reset index for score_func to work + clf.fit(X, y.reset_index(drop=True), sample_weight=sample_weight)