tekkonetes commited on
Commit
a9d0655
·
1 Parent(s): 07966ca

Create main.py

Browse files
Files changed (1) hide show
  1. main.py +62 -0
main.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Import libraries
2
+ import torch
3
+ from transformers import GPT2Tokenizer, GPT2LMHeadModel
4
+ from torch.utils.data import Dataset, DataLoader
5
+ import os
6
+
7
+ # Define dataset and functions
8
+ class TextDataset(Dataset):
9
+ def __init__(self, file_path, block_size):
10
+ self.block_size = block_size
11
+ with open(file_path, 'r', encoding='utf-8') as f:
12
+ self.examples = [line for line in f.read().splitlines() if (len(line) > 0 and not line.isspace())]
13
+
14
+ self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2-medium')
15
+ self.special_tokens_dict = {'pad_token': '<PAD>'}
16
+ self.num_added_toks = self.tokenizer.add_special_tokens(self.special_tokens_dict)
17
+
18
+ def __len__(self):
19
+ return len(self.examples)
20
+
21
+ def __getitem__(self, idx):
22
+ text = self.examples[idx]
23
+ tokenized_text = self.tokenizer.encode(text)
24
+ if len(tokenized_text) > self.block_size:
25
+ tokenized_text = tokenized_text[:self.block_size]
26
+ tokenized_text += [self.tokenizer.pad_token_id] * (self.block_size - len(tokenized_text))
27
+ return torch.tensor(tokenized_text)
28
+
29
+ # Define training
30
+ def train():
31
+ train_dataset = TextDataset('path/to/your/text/file.txt', block_size=512)
32
+ train_loader = DataLoader(train_dataset, batch_size=8, shuffle=True)
33
+
34
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
35
+ model = GPT2LMHeadModel.from_pretrained('gpt2-medium')
36
+ model.resize_token_embeddings(len(train_dataset.tokenizer))
37
+
38
+ model.to(device)
39
+
40
+ optimizer = torch.optim.Adam(model.parameters(), lr=1e-5)
41
+ criterion = torch.nn.CrossEntropyLoss(ignore_index=train_dataset.tokenizer.pad_token_id)
42
+
43
+ epochs = 5
44
+
45
+ for epoch in range(epochs):
46
+ model.train()
47
+ total_loss = 0
48
+ for batch in train_loader:
49
+ batch = batch.to(device)
50
+ optimizer.zero_grad()
51
+ outputs = model(input_ids=batch[:, :-1], labels=batch[:, 1:])
52
+ loss = criterion(outputs.logits.view(-1, outputs.logits.shape[-1]), batch[:, 1:].view(-1))
53
+ loss.backward()
54
+ optimizer.step()
55
+ total_loss += loss.item()
56
+
57
+ print(f'Epoch {epoch+1}, Loss: {total_loss/len(train_loader):.4f}')
58
+
59
+ model.save_pretrained('finetuned_model')
60
+
61
+ # TRAIN THE MODEL!!!
62
+ train()