Skip to content

Commit

Permalink
[python] refine: solve several trivial issues (#753)
Browse files Browse the repository at this point in the history
* refine python codes

* fix appveryor test

* add note to feature_importances
  • Loading branch information
wxchan authored and guolinke committed Aug 18, 2017
1 parent e7c5327 commit a4ab155
Show file tree
Hide file tree
Showing 4 changed files with 14 additions and 10 deletions.
2 changes: 1 addition & 1 deletion appveyor.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@ test_script:
- conda info -a
- conda install --yes numpy scipy scikit-learn pandas matplotlib
- pip install pep8 pytest
- pytest tests/c_api_test/test.py
- pytest tests/c_api_test/test_.py
- "set /p LGB_VER=< VERSION.txt"
- cd python-package && python setup.py sdist --formats gztar
- cd dist
Expand Down
12 changes: 6 additions & 6 deletions python-package/lightgbm/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -224,10 +224,6 @@ def c_int_array(data):
def _data_from_pandas(data, feature_name, categorical_feature, pandas_categorical):
if isinstance(data, DataFrame):
if feature_name == 'auto' or feature_name is None:
if all([isinstance(name, integer_types + (np.integer, )) for name in data.columns]):
msg = """Using Pandas (default) integer column names, not column indexes. You can use indexes with DataFrame.values."""
warnings.filterwarnings('once')
warnings.warn(msg, stacklevel=5)
data = data.rename(columns=str)
cat_cols = data.select_dtypes(include=['category']).columns
if pandas_categorical is None: # train dataset
Expand Down Expand Up @@ -624,6 +620,8 @@ def _lazy_init(self, data, label=None, max_bin=255, reference=None,
self.max_bin = max_bin
self.predictor = predictor
params["max_bin"] = max_bin
if "verbosity" in params:
params.setdefault("verbose", params.pop("verbosity"))
if silent:
params["verbose"] = 0
elif "verbose" not in params:
Expand All @@ -642,8 +640,8 @@ def _lazy_init(self, data, label=None, max_bin=255, reference=None,
else:
raise TypeError("Wrong type({}) or unknown name({}) in categorical_feature"
.format(type(name).__name__, name))

params['categorical_column'] = sorted(categorical_indices)
if categorical_indices:
params['categorical_column'] = sorted(categorical_indices)

params_str = param_dict_to_str(params)
"""process for reference dataset"""
Expand Down Expand Up @@ -1222,6 +1220,8 @@ def __init__(self, params=None, train_set=None, model_file=None, silent=False):
self.best_iteration = -1
self.best_score = {}
params = {} if params is None else params
if "verbosity" in params:
params.setdefault("verbose", params.pop("verbosity"))
if silent:
params["verbose"] = 0
elif "verbose" not in params:
Expand Down
10 changes: 7 additions & 3 deletions python-package/lightgbm/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -454,9 +454,13 @@ def evals_result_(self):

@property
def feature_importances_(self):
"""Get normailized feature importances."""
importace_array = self.booster_.feature_importance().astype(np.float32)
return importace_array / importace_array.sum()
"""
Get feature importances.
Note: feature importance in sklearn interface used to normalize to 1,
it's deprecated after 2.0.4 and same as Booster.feature_importance() now
"""
return self.booster_.feature_importance()

@LGBMDeprecated('Use attribute booster_ instead.')
def booster(self):
Expand Down
File renamed without changes.

0 comments on commit a4ab155

Please sign in to comment.