1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121
| import torch import torch.nn as nn import matplotlib.pyplot as plt import numpy as np import pandas as pd from sklearn.preprocessing import LabelEncoder from torch.utils.data import DataLoader, TensorDataset
/kaggle/input/playground-series-s4e1/train.csv data=pd.read_csv('/kaggle/input/playground-series-s4e1/train.csv') data=data.drop(["id"],axis=1) data=data.drop(["CustomerId"],axis=1) data=data.drop(["Surname"],axis=1)
from sklearn.preprocessing import LabelEncoder data.iloc[:,1] = LabelEncoder().fit_transform(data.iloc[:,1]) data.iloc[:,2] = LabelEncoder().fit_transform(data.iloc[:,2])
from sklearn.model_selection import train_test_split
data.iloc[:, 1] = LabelEncoder().fit_transform(data.iloc[:, 1]) data.iloc[:, 2] = LabelEncoder().fit_transform(data.iloc[:, 2])
for col in data.columns: data[col] = pd.to_numeric(data[col], errors='coerce')
data = data.fillna(0)
x_train,x_test,y_train,y_test=train_test_split(torch.tensor(data.iloc[:, :-1].values, dtype=torch.float32),torch.tensor(data.iloc[:, -1].values, dtype=torch.float32),test_size=0.3)
dataset = TensorDataset(x_train, y_train) batch_size = 32 train_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
class DNN(nn.Module): def __init__(self): super(DNN, self).__init__() self.net = nn.Sequential( nn.Linear(10, 32), nn.ReLU(), nn.Dropout(0.5), nn.Linear(32, 16), nn.ReLU(), nn.Dropout(0.5), nn.Linear(16, 8), nn.ReLU(), nn.Linear(8, 4), nn.ReLU(), nn.Linear(4, 1), nn.Sigmoid() ) def forward(self, x): return self.net(x)
model = DNN() loss_fn = nn.BCELoss() optimizer = torch.optim.SGD(model.parameters(), lr=0.001)
epochs = 20 losses = [] add Codeadd Markdown for epoch in range(epochs): print('now is epoch:', epoch) epoch_loss = 0 correct_predictions = 0 total_predictions = 0
for batch_x, batch_y in train_loader: optimizer.zero_grad() Pred = model(batch_x)
Pred_binary = (Pred > 0.5).float() correct_predictions += (Pred_binary == batch_y).sum().item() total_predictions += batch_y.size(0)
loss = loss_fn(Pred.view(-1), batch_y) epoch_loss += loss.item() loss.backward() optimizer.step()
accuracy = correct_predictions / total_predictions * 100 losses.append(epoch_loss / len(train_loader)) print(f'Loss: {epoch_loss / len(train_loader):.4f}, Accuracy: {accuracy:.2f}%') for epoch in range(epochs): print('now is epoch:', epoch) epoch_loss = 0 correct_predictions = 0 total_predictions = 0 for batch_x, batch_y in train_loader: optimizer.zero_grad() Pred = model(batch_x) Pred_binary = (Pred > 0.5).float() correct_predictions += (Pred_binary.view(-1) == batch_y.view(-1)).sum().item() total_predictions += batch_y.size(0) loss = loss_fn(Pred.view(-1), batch_y) epoch_loss += loss.item() loss.backward() optimizer.step() accuracy = correct_predictions / total_predictions * 100 losses.append(epoch_loss / len(train_loader)) print(f'Loss: {epoch_loss / len(train_loader):.4f}, Accuracy: {accuracy:.2f}%')
|