Pytorch lightening

Sequence to Sequence Learning with Neural Networks.ipynb at master · bentrevett/pytorch-seq2seq

BERT Fine-Tuning Tutorial with PyTorch · Chris McCormick

Pytorch Tutorial

import torch
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

🤗Hugging Face

Huggingface

Torch

Dataloader

from torch.utils.data import TensorDataset
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler

BATCH_SIZE = 256

inputs = [...]
target = [...]

dataset = TensorDataset(inputs, target)
dataloader = DataLoader(dataset,
                        sampler=RandomSampler(dataset), # SequentialSampler(dataset)
                        batch_size=BATCH_SIZE)

Train function

def train(model, dataloader, optimizer, criterion):
    model.train()
    epoch_loss = 0
    for step, batch in enumerate(dataloader):

				# inputs
        inputs = batch.to(device)
        target = batch.to(device)
				
				# zero grad
        model.zero_grad()
        
				# model output
				output = model()
        
				# loss
        output = output.view(-1)
        loss = criterion(output, scores)
				
				# backprop
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 1.)
        optimizer.step()
	      
        epoch_loss += loss.item()
    return epoch_loss / len(dataloader)

def evaluate(model, dataloader, criterion):
    model.eval() # fix model parameter
    epoch_loss = 0

    for step, batch in enumerate(dataloader):
        
        # inputs
        inputs = batch.to(device)
        target = batch.to(device)

				# output      
        output = model(inputs)
				
				# loss
        output = output.view(-1)
        loss = criterion(output, scores)

        epoch_loss += loss.item()
    return epoch_loss / len(dataloader)

Train epoch

# Build model from class
submodel = MySubModel(**kwargs)
model = MyModel(submodel, **kwargs)
model.to(device)

# Define loss function
criterion = nn.MSELoss()

# Optimizer
optimizer = Adam()

# Start training

EPOCHS = 50
best_dev_loss = np.inf
start_time = time.time()
for epoch in range(EPOCHS):
    epoch_start_time = time.time()

    train_loss = train(model, train_dataloader, optimizer, criterion)
    dev_loss = evaluate(model, dev_dataloader, criterion)
    test_loss = evaluate(model, test_dataloader, criterion)

    print(f"Epoch: {epoch}| {format_time(time.time() - epoch_start_time)}")
    print(f"=> train_loss: {train_loss:4f} \\tdev_loss: {dev_loss:4f} \\ttest_loss: {test_loss:4f}")

    if best_dev_loss > dev_loss:
        best_dev_loss = dev_loss
        torch.save(model.state_dict(),PATH+"model.pt")
        print('=> save model!')

print(f"Finish!\\n Total time{format_time(time.time() - start_time)}")