Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[DOC] added usage examples to multiple estimator docstrings #6187

Merged
merged 17 commits into from Apr 26, 2024
Merged
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
10 changes: 10 additions & 0 deletions sktime/classification/deep_learning/inceptiontime.py
Expand Up @@ -41,6 +41,16 @@ class InceptionTimeClassifier(BaseDeepClassifier):

Adapted from the implementation from Fawaz et. al
https://github.com/hfawaz/InceptionTime/blob/master/classifiers/inception.py

Examples
--------
>>> from sktime.classification.deep_learning.inceptiontime import InceptionTimeClassifier # doctest: +SKIP # noqa
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What is this noqa for? Is it for line length? Please fix by splitting into multiple lines instead of adding noqa.

Also, I think doctest need not be skipped in this line, but it's okay to have as well.

>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> itc = InceptionTimeClassifier(n_epochs=20,batch_size=4) # doctest: +SKIP
>>> itc.fit(X_train, y_train) # doctest: +SKIP
InceptionTimeClassifier(...)
"""

_tags = {
Expand Down
10 changes: 10 additions & 0 deletions sktime/classification/deep_learning/lstmfcn.py
Expand Up @@ -52,6 +52,16 @@ class LSTMFCNClassifier(BaseDeepClassifier):
----------
.. [1] Karim et al. Multivariate LSTM-FCNs for Time Series Classification, 2019
https://arxiv.org/pdf/1801.04503.pdf

Examples
--------
>>> from sktime.classification.deep_learning.lstmfcn import LSTMFCNClassifier
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True)
>>> lstmfcn = FCNClassifier(n_epochs=20,batch_size=4) # doctest: +SKIP
>>> lstmfcn.fit(X_train, y_train) # doctest: +SKIP
FCNClassifier(...)
"""

_tags = {
Expand Down
16 changes: 16 additions & 0 deletions sktime/classification/distance_based/_shape_dtw.py
Expand Up @@ -108,6 +108,22 @@ class ShapeDTW(BaseClassifier):
.. [1] Jiaping Zhao and Laurent Itti, "shapeDTW: Shape Dynamic Time Warping",
Pattern Recognition, 74, pp 171-184, 2018
http://www.sciencedirect.com/science/article/pii/S0031320317303710,

Examples
--------
>>> from sktime.classification.distance_based import ShapeDTW
>>> from sktime.datasets import load_unit_test # doctest: +SKIP
>>> X_train, y_train = load_unit_test(split="train") # doctest: +SKIP
>>> X_test, y_test = load_unit_test(split="test") # doctest: +SKIP
>>> clf = ShapeDTW(n_neighbors=1,
... subsequence_length=30,
... shape_descriptor_function="raw",
... shape_descriptor_functions=None,
... metric_params=None
... ) # doctest: +SKIP
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It'll be good to add indentation after ... , otherwise it looks bad in the hosted docs (and inconsistent with other examples).

>>> clf.fit(X_train, y_train) # doctest: +SKIP
ShapeDTW(...)
>>> y_pred = clf.predict(X_test) # doctest: +SKIP
"""

_tags = {
Expand Down
Expand Up @@ -71,6 +71,25 @@ class KNeighborsTimeSeriesClassifierPyts(_PytsAdapter, BaseClassifier):
----------
classes_ : array, shape = (n_classes,)
An array of class labels known to the classifier.

Examples
--------
>>> from sktime.classification.distance_based import KNeighborsTimeSeriesClassifierPyts # doctest: +SKIP # noqa
>>> from sktime.datasets import load_unit_test # doctest: +SKIP
>>> X_train, y_train = load_unit_test(split="train") # doctest: +SKIP
>>> X_test, y_test = load_unit_test(split="test") # doctest: +SKIP
>>> clf = KNeighborsTimeSeriesClassifierPyts(n_neighbors=1,
... weights="uniform",
... algorithm="auto",
... leaf_size=30,
... p=2,
... metric="minkowski",
... metric_params=None,
... n_jobs=1
... ) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
>>> clf.fit(X_train, y_train) # doctest: +SKIP
KNeighborsTimeSeriesClassifierPyts(...)
>>> y_pred = clf.predict(X_test) # doctest: +SKIP
"""

_tags = {
Expand Down
Expand Up @@ -55,6 +55,24 @@ class KNeighborsTimeSeriesClassifierTslearn(_TslearnAdapter, BaseClassifier):
Above 50, the output is sent to stdout.
The frequency of the messages increases with the verbosity level.
If it more than 10, all iterations are reported.

Examples
--------
>>> from sktime.classification.distance_based import KNeighborsTimeSeriesClassifierTslearn # doctest: +SKIP # noqa
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
>>> from sktime.datasets import load_unit_test # doctest: +SKIP
>>> X_train, y_train = load_unit_test(split="train") # doctest: +SKIP
>>> X_test, y_test = load_unit_test(split="test") # doctest: +SKIP
>>> clf = KNeighborsTimeSeriesClassifierTslearn(
... n_neighbors=5,
... weights="uniform",
... metric="dtw",
... metric_params=None,
... n_jobs=None,
... verbose=0,
... ) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
>>> clf.fit(X_train, y_train) # doctest: +SKIP
KNeighborsTimeSeriesClassifierTslearn(...)
>>> y_pred = clf.predict(X_test) # doctest: +SKIP
"""

_tags = {
Expand Down
17 changes: 16 additions & 1 deletion sktime/classification/ensemble/_ctsf.py
Expand Up @@ -171,6 +171,21 @@ class labels (multi-output problem).
----------
.. [1] Deng et. al, A time series forest for classification and feature extraction,
Information Sciences, 239:2013.

Examples
--------
>>> from sktime.classification.ensemble import ComposableTimeSeriesForestClassifier
>>> from sktime.classification.kernel_based import RocketClassifier
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train") # doctest: +SKIP
>>> X_test, y_test = load_unit_test(split="test") # doctest: +SKIP
>>> clf = ComposableTimeSeriesForestClassifier(
... RocketClassifier(num_kernels=100),
... n_estimators=10,
... ) # doctest: +SKIP
>>> clf.fit(X_train, y_train) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
ComposableTimeSeriesForestClassifier(...)
>>> y_pred = clf.predict(X_test) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
"""

_tags = {
Expand All @@ -188,7 +203,7 @@ class labels (multi-output problem).

def __init__(
self,
estimator=None,
estimator,
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I am not sure if this is acceptable in a example addition PR. Plus it's potentially a breaking change as it now wants makes estimator a positional argument from a keyword argument. In fact this is what causes the test failure in CI

FYI @fkiraly

n_estimators=100,
max_depth=None,
min_samples_split=2,
Expand Down
15 changes: 15 additions & 0 deletions sktime/classification/feature_based/_fresh_prince.py
Expand Up @@ -57,6 +57,21 @@ class FreshPRINCE(BaseClassifier):
scalable hypothesis tests (tsfresh–a python package)." Neurocomputing 307
(2018): 72-77.
https://www.sciencedirect.com/science/article/pii/S0925231218304843

yarnabrina marked this conversation as resolved.
Show resolved Hide resolved
>>> from sktime.classification.feature_based import FreshPRINCE
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True) # doctest: +SKIP
>>> clf = FreshPRINCE(
... default_fc_parameters="comprehensive",
... n_estimators=200,
... save_transformed_data=False,
... verbose=0,
... n_jobs=1,
... ) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
>>> clf.fit(X_train, y_train) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
FreshPRINCE(...)
>>> y_pred = clf.predict(X_test) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
"""

_tags = {
Expand Down
Expand Up @@ -49,6 +49,18 @@ class RandomIntervalClassifier(BaseClassifier):
See Also
--------
RandomIntervals

yarnabrina marked this conversation as resolved.
Show resolved Hide resolved
>>> from sktime.classification.feature_based import RandomIntervalClassifier
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True) # doctest: +SKIP
>>> clf = RandomIntervalClassifier(
... estimator=RandomForestClassifier(n_estimators=5)
... ) # doctest: +SKIP
>>> clf.fit(X_train, y_train) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
RandomIntervalClassifier(...)
>>> y_pred = clf.predict(X_test) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
"""

_tags = {
Expand Down
13 changes: 13 additions & 0 deletions sktime/classification/feature_based/_tsfresh_classifier.py
Expand Up @@ -62,6 +62,19 @@ class TSFreshClassifier(BaseClassifier):
scalable hypothesis tests (tsfresh–a python package)." Neurocomputing 307
(2018): 72-77.
https://www.sciencedirect.com/science/article/pii/S0925231218304843

yarnabrina marked this conversation as resolved.
Show resolved Hide resolved
>>> from sktime.classification.feature_based import TSFreshClassifier
>>> from sklearn.ensemble import RandomForestClassifier
>>> from sktime.datasets import load_unit_test
>>> X_train, y_train = load_unit_test(split="train", return_X_y=True)
>>> X_test, y_test = load_unit_test(split="test", return_X_y=True) # doctest: +SKIP
>>> clf = TSFreshClassifier(
... estimator=RandomForestClassifier(n_estimators=5),
... default_fc_parameters="efficient",
... ) # doctest: +SKIP
>>> clf.fit(X_train, y_train) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
TSFreshClassifier(...)
>>> y_pred = clf.predict(X_test) # doctest: +SKIP
MihirsinhChauhan marked this conversation as resolved.
Show resolved Hide resolved
"""

_tags = {
Expand Down