|
1 | 1 | import pytest |
2 | 2 | import ray |
3 | 3 | import pandas as pd |
| 4 | +import numpy as np |
| 5 | +from sklearn.compose import ColumnTransformer |
4 | 6 | from sklearn.model_selection import train_test_split |
5 | 7 | from sklearn.pipeline import Pipeline |
6 | | -from sklearn.impute import SimpleImputer |
7 | | -from sklearn.preprocessing import StandardScaler, OneHotEncoder |
8 | | -from hercules.Datamodel import Xy |
9 | | -from hercules.Datamodel import XYRef |
10 | | -import hercules.Datamodel as dm |
11 | | -import hercules.RuntimeNew as rt |
12 | | -from hercules.RuntimeNew import ExecutionType |
| 8 | +from sklearn.preprocessing import StandardScaler |
| 9 | +from sklearn.tree import DecisionTreeClassifier |
| 10 | +import codeflare.pipelines.Datamodel as dm |
| 11 | +import codeflare.pipelines.Runtime as rt |
| 12 | +from codeflare.pipelines.Datamodel import Xy |
| 13 | +from codeflare.pipelines.Datamodel import XYRef |
| 14 | +from codeflare.pipelines.Runtime import ExecutionType |
13 | 15 |
|
14 | 16 | def test_or(): |
15 | 17 |
|
| 18 | + ray.shutdown() |
16 | 19 | ray.init() |
17 | | - |
18 | | - train = pd.read_csv('../resources/data/train_ctrUa4K.csv') |
19 | | - test = pd.read_csv('../resources/data/test_lAUu6dG.csv') |
20 | 20 |
|
21 | | - X = train.drop('Loan_Status', axis=1) |
22 | | - y = train['Loan_Status'] |
| 21 | + ## prepare the data |
| 22 | + X = pd.DataFrame(np.random.randint(0,100,size=(10000, 4)), columns=list('ABCD')) |
| 23 | + y = pd.DataFrame(np.random.randint(0,2,size=(10000, 1)), columns=['Label']) |
| 24 | + |
| 25 | + numeric_features = X.select_dtypes(include=['int64']).columns |
| 26 | + numeric_transformer = Pipeline(steps=[ |
| 27 | + ('scaler', StandardScaler())]) |
| 28 | + |
| 29 | + ## set up preprocessor as StandardScaler |
| 30 | + preprocessor = ColumnTransformer( |
| 31 | + transformers=[ |
| 32 | + ('num', numeric_transformer, numeric_features), |
| 33 | + ]) |
23 | 34 |
|
24 | 35 | X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2) |
25 | 36 |
|
26 | 37 | X_ref = ray.put(X_train) |
27 | 38 | y_ref = ray.put(y_train) |
28 | 39 |
|
29 | 40 | Xy_ref = XYRef(X_ref, y_ref) |
30 | | - Xy_ref_list = [Xy_ref] |
| 41 | + Xy_ref_ptr = ray.put(Xy_ref) |
| 42 | + Xy_ref_ptrs = [Xy_ref_ptr] |
| 43 | + |
| 44 | + ## create two decision tree classifiers with different depth limit |
| 45 | + c_a = DecisionTreeClassifier(max_depth=3) |
| 46 | + c_b = DecisionTreeClassifier(max_depth=5) |
31 | 47 |
|
| 48 | + ## initialize codeflare pipeline by first creating the nodes |
32 | 49 | pipeline = dm.Pipeline() |
33 | | - node_a = dm.OrNode('preprocess', preprocessor) |
34 | | - node_b = dm.OrNode('c_a', c_a) |
35 | | - node_c = dm.OrNode('c_b', c_b) |
36 | | - |
| 50 | + node_a = dm.EstimatorNode('preprocess', preprocessor) |
| 51 | + node_b = dm.EstimatorNode('c_a', c_a) |
| 52 | + node_c = dm.EstimatorNode('c_b', c_b) |
| 53 | + |
| 54 | + ## codeflare nodes are then connected by edges |
37 | 55 | pipeline.add_edge(node_a, node_b) |
38 | 56 | pipeline.add_edge(node_a, node_c) |
39 | 57 |
|
40 | | - in_args={node_a: Xy_ref_list} |
| 58 | + in_args={node_a: Xy_ref_ptrs} |
| 59 | + ## execute the codeflare pipeline |
41 | 60 | out_args = rt.execute_pipeline(pipeline, ExecutionType.FIT, in_args) |
42 | 61 |
|
| 62 | + ## retrieve node b |
43 | 63 | node_b_out_args = ray.get(out_args[node_b]) |
44 | | - node_c_out_args = ray.get(out_args[node_c]) |
45 | | - |
46 | 64 | b_out_xyref = node_b_out_args[0] |
47 | | - |
48 | 65 | ray.get(b_out_xyref.get_Xref()) |
| 66 | + b_out_node = ray.get(b_out_xyref.get_currnoderef()) |
| 67 | + sct_b = b_out_node.get_estimator() |
| 68 | + print(sct_b.feature_importances_) |
| 69 | + |
| 70 | + ray.shutdown() |
49 | 71 |
|
50 | 72 |
|
51 | 73 | if __name__ == "__main__": |
52 | 74 | sys.exit(pytest.main(["-v", __file__])) |
53 | | - |
54 | | - |
|
0 commit comments