diff --git a/07_linear_regression.py b/07_linear_regression.py index c39db2bb..594067bf 100644 --- a/07_linear_regression.py +++ b/07_linear_regression.py @@ -2,50 +2,72 @@ import torch.nn as nn import numpy as np from sklearn import datasets -import matplotlib.pyplot as plt +from sklearn.preprocessing import StandardScaler +from sklearn.model_selection import train_test_split -# 0) Prepare data -X_numpy, y_numpy = datasets.make_regression(n_samples=100, n_features=1, noise=20, random_state=4) - -# cast to float Tensor -X = torch.from_numpy(X_numpy.astype(np.float32)) -y = torch.from_numpy(y_numpy.astype(np.float32)) -y = y.view(y.shape[0], 1) +# 2025/09/16 +# 0) prepare data +bc = datasets.load_breast_cancer() +X, y = bc.data, bc.target n_samples, n_features = X.shape -# 1) Model -# Linear model f = wx + b -input_size = n_features -output_size = 1 -model = nn.Linear(input_size, output_size) +X_train,X_test,y_train,y_test = train_test_split(X,y, test_size=0.2, random_state=1234) + +# scale +sc = StandardScaler() +X_train = sc.fit_transform(X_train) +X_test = sc.transform(X_test) + +X_train = torch.from_numpy(X_train.astype(np.float32)) +X_test = torch.from_numpy(X_test.astype(np.float32)) +y_train = torch.from_numpy(y_train.astype(np.float32)) +y_test = torch.from_numpy(y_test.astype(np.float32)) + +y_train = y_train.view(y_train.shape[0],1) +y_test = y_test.view(y_test.shape[0],1) + +# 1)model +# f= = wx + b,sigmoid at the end +class LogisticRegression(nn.Module): + def __init__(self,n_input_features): + super(LogisticRegression, self).__init__() + self.linear = nn.Linear(n_input_features,1) + + def forward(self,x): + y_predicted =torch.sigmoid(self.linear(x)) + return y_predicted + +model = LogisticRegression(n_features) # 2) Loss and optimizer learning_rate = 0.01 +criterion = nn.BCELoss() +optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) -criterion = nn.MSELoss() -optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate) - -# 3) Training loop +# 3)training loop num_epochs = 100 for epoch in range(num_epochs): - # Forward pass and loss - y_predicted = model(X) - loss = criterion(y_predicted, y) - - # Backward pass and update - loss.backward() - optimizer.step() - - # zero grad before new step - optimizer.zero_grad() - - if (epoch+1) % 10 == 0: + # forward pass and loss + y_predicted = model(X_train) + loss = criterion(y_predicted, y_train) + + # backward pass + loss.backward() + + # updates + optimizer.step() + + # zero gradients + optimizer.zero_grad() + + if (epoch+1) % 10 == 0: print(f'epoch: {epoch+1}, loss = {loss.item():.4f}') -# Plot -predicted = model(X).detach().numpy() +with torch.no_grad(): + y_predicted = model(X_test) + y_predicted_cls = y_predicted.round() + acc = y_predicted_cls.eq(y_test).sum() / float(y_test.shape[0]) + print(f'accuracy ={acc:.4f}') + -plt.plot(X_numpy, y_numpy, 'ro') -plt.plot(X_numpy, predicted, 'b') -plt.show()