///'import copy//nimport torch//nfrom torch import nn//nfrom torch import optim//nimport torchtext//nfrom torchtext import data//nfrom torchtext import datasets//n//nTEXT = data.Field(sequential=True, batch_first=True, lower=True)//nLABEL = data.LabelField()//n//n# load data splits//ntrain_data, val_data, test_data = datasets.SST.splits(TEXT, LABEL)//n//n# build dictionary//nTEXT.build_vocab(train_data, vectors='glove.6B.100d') # load GloVe embeddings//nLABEL.build_vocab(train_data)//n# hyperparameters//nvocab_size = len(TEXT.vocab)//nlabel_size = len(LABEL.vocab)//npadding_idx = TEXT.vocab.stoi['']//nembedding_dim = 100 # same as pretrained embedding dimensions//nhidden_dim = 128//n//n# build iterators//ntrain_iter, val_iter, test_iter = data.BucketIterator.splits(//n (train_data, val_data, test_data), //n batch_size=32)//n# your code here//nuse_cuda = torch.cuda.is_available()//ndevice = torch.device(/'cuda/' if use_cuda else /'cpu/') //n//n# define the model//nclass Model(nn.Module)://n def init(self, vocab_size, embedding_dim, hidden_dim, label_size, padding_idx)://n super(Model, self).init()//n self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=padding_idx)//n self.lstm = nn.LSTM(embedding_dim, hidden_dim, batch_first=True)//n self.fc = nn.Linear(hidden_dim, label_size)//n//n def forward(self, x)://n embedded = self.embedding(x)//n output, (hidden, cell) = self.lstm(embedded)//n logits = self.fc(hidden[-1])//n return logits//n//nmodel = Model(vocab_size, embedding_dim, hidden_dim, label_size, padding_idx)//nmodel.embedding.weight.data.copy_(TEXT.vocab.vectors) # load pretrained embeddings//n//n# Training function //ndef train(model, train_loader, optimizer, criterion): //n model.train() //n total_loss = 0.0 //n total_correct = 0 //n for batch in train_loader: //n text, labels = batch.text.to(device), batch.label.to(device) //n optimizer.zero_grad() //n logits = model(text)//n loss = criterion(logits, labels) //n //n loss.backward() //n optimizer.step() //n //n total_loss += loss.item() * text.size(0) //n preds = logits.argmax(dim=1) //n total_correct += (preds == labels).sum().item() //n avg_loss = total_loss / len(train_loader.dataset) //n accuracy = total_correct / len(train_loader.dataset) //n return avg_loss, accuracy //n//ndef evaluate(model, iterator, criterion)://n epoch_loss = 0//n epoch_acc = 0//n//n model.eval()//n//n with torch.no_grad()://n for batch in iterator://n text, labels = batch.text.to(device), batch.label.to(device) //n predictions = model(text)//n//n loss = criterion(predictions, batch.label)//n acc = accuracy(predictions, batch.label)//n//n epoch_loss += loss.item()//n epoch_acc += acc.item()//n//n return epoch_loss / len(iterator), epoch_acc / len(iterator)//n//n# define loss function and optimizer//ncriterion = nn.CrossEntropyLoss()//noptimizer = optim.Adam(model.parameters())//n//n# move model to device//nmodel = model.to(device)//n//n# training loop//nbest_val_loss = float('inf')//nbest_model = None//n//nfor epoch in range(1, 21)://n train_loss, train_acc = train(model, train_iter, optimizer, criterion)//n val_loss, val_acc = evaluate(model, val_iter, criterion)//n//n if val_loss < best_val_loss://n best_val_loss = val_loss//n best_model = copy.deepcopy(model.state_dict())//n//n print(f/'Epoch [{epoch}/20], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Train Acc: {train_acc:.4f}, Val Acc: {val_acc:.4f}/')//n//n# load best model and evaluate on test set//nmodel.load_state_dict(best_model)//ntest_loss, test_acc = evaluate(model, test_iter, criterion)//nprint(f/'Test loss: {test_loss:.4f}, Test accuracy: {test_acc:.4f}/')//n/