-
Notifications
You must be signed in to change notification settings - Fork 89
/
feature_selection.py
217 lines (189 loc) · 8.06 KB
/
feature_selection.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
"""Implements feature selection algorithms."""
__author__ = ["aiwalter"]
__all__ = ["FeatureSelection"]
import math
import pandas as pd
from aeon.transformations.base import BaseTransformer
from aeon.utils.validation.forecasting import check_regressor
class FeatureSelection(BaseTransformer):
"""
Select exogenous features.
Transformer to enable tuneable feauture selection of exogenous data. The
FeatureSelection implements multiple methods to select features (columns).
In case X is a pd.Series, then it is just passed through, unless method="none",
then None is returned in transform().
Parameters
----------
method : str
The method of how to select the features. Implemeted methods are:
* "feature-importances": Use feature_importances_ of the regressor (meta-model)
to select n_columns with highest importance values.
Requires parameter n_columns.
* "random": Randomly select n_columns features. Requires parameter n_columns.
* "columns": Select features by given names.
* "none": Remove all columns by setting Z to None.
* "all": Select all given features.
n_columns : int, default = None
Number of feautres (columns) to select. n_columns must be <=
number of X columns. Some methods require n_columns to be given.
regressor : sklearn-like regressor, default=None
Used as meta-model for the method "feature-importances". The given
regressor must have an attribute "feature_importances_". If None,
then a GradientBoostingRegressor(max_depth=5) is used.
random_state : int, RandomState instance or None, default=None
Used to set random_state of the default regressor and to
set random.seed() if method="random".
columns : list of str
A list of columns to select. If columns is given.
Attributes
----------
columns_ : list of str
List of columns that have been selected as features.
regressor_ : sklearn-like regressor
Fitted regressor (meta-model).
n_columns_: int
Derived from number of features if n_columns is None, then
n_columns_ is calculated as int(math.ceil(Z.shape[1] / 2)). So taking
half of given features only as default.
feature_importances_ : dict or None
A dictionary with column name as key and feature imporatnce value as value.
The dict is sorted descending on value. This attribute is a dict if
method="feature-importances", else None.
Examples
--------
>>> from aeon.transformations.feature_selection import FeatureSelection
>>> from aeon.datasets import load_longley
>>> y, X = load_longley()
>>> transformer = FeatureSelection(method="feature-importances", n_columns=3)
>>> X_hat = transformer.fit_transform(X, y)
"""
_tags = {
"input_data_type": "Series",
# what is the scitype of X: Series, or Panel
"output_data_type": "Series",
# what scitype is returned: Primitives, Series, Panel
"instancewise": True, # is this an instance-wise transform?
"X_inner_type": ["pd.DataFrame", "pd.Series"],
# which mtypes do _fit/_predict support for X?
"y_inner_type": "pd.DataFrame", # which mtypes do _fit/_predict support for y?
"fit_is_empty": False,
"transform-returns-same-time-index": True,
"skip-inverse-transform": True,
"univariate-only": False,
}
def __init__(
self,
method="feature-importances",
n_columns=None,
regressor=None,
random_state=None,
columns=None,
):
self.n_columns = n_columns
self.method = method
self.regressor = regressor
self.random_state = random_state
self.columns = columns
super(FeatureSelection, self).__init__()
def _fit(self, X, y=None):
"""Fit transformer to X and y.
private _fit containing the core logic, called from fit
Parameters
----------
X : pd.Series or pd.DataFrame
Data to fit transform to
y : pd.DataFrame, default=None
Additional data, e.g., labels for transformation
Returns
-------
self: a fitted instance of the estimator
"""
self.n_columns_ = self.n_columns
self.feature_importances_ = None
if self.method == "none":
self.set_tags(**{"output_data_type": "Primitives"})
# multivariate X
if not isinstance(X, pd.Series):
if self.method == "feature-importances":
self.regressor_ = check_regressor(
regressor=self.regressor, random_state=self.random_state
)
self._check_n_columns(X)
# fit regressor with X as exog data and y as endog data (target)
self.regressor_.fit(X=X, y=y)
if not hasattr(self.regressor_, "feature_importances_"):
raise ValueError(
"""The given regressor must have an
attribute feature_importances_ after fitting."""
)
# create dict with columns name (key) and feauter importance (value)
d = dict(zip(X.columns, self.regressor_.feature_importances_))
# sort d descending
d = {k: d[k] for k in sorted(d, key=d.get, reverse=True)}
self.feature_importances_ = d
self.columns_ = list(d.keys())[: self.n_columns_]
elif self.method == "random":
self._check_n_columns(X)
self.columns_ = list(
X.sample(
n=self.n_columns_, random_state=self.random_state, axis=1
).columns
)
elif self.method == "columns":
if self.columns is None:
raise AttributeError("Parameter columns must be given.")
self.columns_ = self.columns
elif self.method == "none":
self.columns_ = None
elif self.method == "all":
self.columns_ = list(X.columns)
else:
raise ValueError("Incorrect method given. Try another method.")
return self
def _transform(self, X, y=None):
"""Transform X and return a transformed version.
private _transform containing the core logic, called from transform
Parameters
----------
X : pd.Series or pd.DataFrame
Data to be transformed
y : ignored argument for interface compatibility
Additional data, e.g., labels for transformation
Returns
-------
Xt : pd.Series or pd.DataFrame, same type as X
transformed version of X
"""
# multivariate case
if not isinstance(X, pd.Series):
if self.method == "none":
Xt = None
else:
Xt = X[self.columns_]
# univariate case
else:
if self.method == "none":
Xt = None
else:
Xt = X
return Xt
def _check_n_columns(self, Z):
if not isinstance(self.n_columns_, int):
self.n_columns_ = int(math.ceil(Z.shape[1] / 2))
@classmethod
def get_test_params(cls, parameter_set="default"):
"""Return testing parameter settings for the estimator.
Parameters
----------
parameter_set : str, default="default"
Name of the set of test parameters to return, for use in tests. If no
special parameters are defined for a value, will return `"default"` set.
Returns
-------
params : dict or list of dict, default = {}
Parameters to create testing instances of the class
Each dict are parameters to construct an "interesting" test instance, i.e.,
`MyClass(**params)` or `MyClass(**params[i])` creates a valid test instance.
`create_test_instance` uses the first (or only) dictionary in `params`
"""
return {"method": "all"}