diff --git a/Part 3 - Classification/Section 20 - Random Forest Classification/classification_template.py b/Part 3 - Classification/Section 20 - Random Forest Classification/classification_template.py index 86bf97f..d1ae3ec 100644 --- a/Part 3 - Classification/Section 20 - Random Forest Classification/classification_template.py +++ b/Part 3 - Classification/Section 20 - Random Forest Classification/classification_template.py @@ -12,7 +12,7 @@ # Splitting the dataset into the Training set and Test set from sklearn.cross_validation import train_test_split -X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0) +X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.20, random_state = 0) # Feature Scaling from sklearn.preprocessing import StandardScaler @@ -33,21 +33,7 @@ # Visualising the Training set results from matplotlib.colors import ListedColormap X_set, y_set = X_train, y_train -X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X_set[:, 0].max() + 1, step = 0.01), - np.arange(start = X_set[:, 1].min() - 1, stop = X_set[:, 1].max() + 1, step = 0.01)) -plt.contourf(X1, X2, classifier.predict(np.array([X1.ravel(), X2.ravel()]).T).reshape(X1.shape), - alpha = 0.75, cmap = ListedColormap(('red', 'green'))) -plt.xlim(X1.min(), X1.max()) -plt.ylim(X2.min(), X2.max()) -for i, j in enumerate(np.unique(y_set)): - plt.scatter(X_set[y_set == j, 0], X_set[y_set == j, 1], - c = ListedColormap(('red', 'green'))(i), label = j) -plt.title('Classifier (Training set)') -plt.xlabel('Age') -plt.ylabel('Estimated Salary') -plt.legend() -plt.show() - +X1, X2 = np.meshgrid(np.arange(start = X_set[:, 0].min() - 1, stop = X # Visualising the Test set results from matplotlib.colors import ListedColormap X_set, y_set = X_test, y_test @@ -64,4 +50,4 @@ plt.xlabel('Age') plt.ylabel('Estimated Salary') plt.legend() -plt.show() \ No newline at end of file +plt.show()