forked from Timothy102/mlx-bert
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtrain_ots.py
128 lines (106 loc) · 4.02 KB
/
train_ots.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
import torch
from torch.utils.data import DataLoader, Dataset
from transformers import BertTokenizer, BertForSequenceClassification, AdamW
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import pandas as pd
import chardet
class TextDataset(Dataset):
def __init__(self, texts, labels, tokenizer, max_len):
self.texts = texts
self.labels = labels
self.tokenizer = tokenizer
self.max_len = max_len
def __len__(self):
return len(self.texts)
def __getitem__(self, item):
text = str(self.texts[item])
label = self.labels[item]
encoding = self.tokenizer.encode_plus(
text,
add_special_tokens=True,
max_length=self.max_len,
return_token_type_ids=False,
padding='max_length',
return_attention_mask=True,
return_tensors='pt',
truncation=True
)
return {
'text': text,
'input_ids': encoding['input_ids'].flatten(),
'attention_mask': encoding['attention_mask'].flatten(),
'labels': torch.tensor(label, dtype=torch.long)
}
def create_data_loader(df, tokenizer, max_len, batch_size, num_workers=0):
ds = TextDataset(
texts=df.text.to_numpy(),
labels=df.label.to_numpy(),
tokenizer=tokenizer,
max_len=max_len
)
return DataLoader(ds, batch_size=batch_size, num_workers=num_workers)
def main():
# Detect encoding
with open('sms_spam_phishing_dataset_v1.2.csv', 'rb') as f:
result = chardet.detect(f.read())
file_encoding = result['encoding']
print("Detected encoding:", file_encoding)
# Load Dataset
df = pd.read_csv('sms_spam_phishing_dataset_v1.2.csv', encoding=file_encoding)
df['label'] = df['label'].map({'ham': 0, 'spam': 1, 'phishing': 2}) # Convert labels to numerical
# Parameters
BATCH_SIZE = 16
MAX_LEN = 128
EPOCHS = 3
# Split Data
train_df, test_df = train_test_split(df, test_size=0.1)
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# Create Data Loaders
train_data_loader = create_data_loader(train_df, tokenizer, MAX_LEN, BATCH_SIZE)
test_data_loader = create_data_loader(test_df, tokenizer, MAX_LEN, BATCH_SIZE)
# Load BERT Model
model = BertForSequenceClassification.from_pretrained('bert-base-uncased', num_labels=3)
# Device
# Updated device detection logic
if torch.backends.mps.is_available():
device = torch.device("mps")
elif torch.cuda.is_available():
device = torch.device("cuda")
else:
device = torch.device("cpu")
print("Device:", device)
model.to(device)
# Optimizer
optimizer = AdamW(model.parameters(), lr=2e-5)
# Training Loop
for epoch in range(EPOCHS):
model.train()
for batch in train_data_loader:
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
outputs = model(input_ids, attention_mask=attention_mask, labels=labels)
loss = outputs.loss
loss.backward()
optimizer.step()
optimizer.zero_grad()
print(f"Epoch {epoch + 1}/{EPOCHS} completed.")
# Evaluate
model.eval()
predictions, true_labels = [], []
for batch in test_data_loader:
input_ids = batch['input_ids'].to(device)
attention_mask = batch['attention_mask'].to(device)
labels = batch['labels'].to(device)
with torch.no_grad():
outputs = model(input_ids, attention_mask=attention_mask)
logits = outputs.logits
predictions.extend(torch.argmax(logits, dim=1).tolist())
true_labels.extend(labels.tolist())
accuracy = accuracy_score(true_labels, predictions)
print(f"Test Accuracy: {accuracy * 100:.2f}%")
# Save the Model
torch.save(model.state_dict(), 'bert_ots_model_1.2.pth')
if __name__ == '__main__':
main()