"import copy\nimport torch\nfrom torch import nn\nfrom torch import optim\nimport torchtext\nfrom torchtext import data\nfrom torchtext import datasets\n\nTEXT = data.Field(sequential=True, batch_first=True, lower=True)\nLABEL = data.LabelField()\n\n# load data splits\ntrain_data, val_data, test_data = datasets.SST.splits(TEXT, LABEL)\n\n# build dictionary\nTEXT.build_vocab(train_data)\nLABEL.build_vocab(train_data)\n# hyperparameters\nvocab_size = len(TEXT.vocab)\nlabel_size = len(LABEL.vocab)\npadding_idx = TEXT.vocab.stoi['']\nembedding_dim = 128\nhidden_dim = 128\n\n# build iterators\ntrain_iter, val_iter, test_iter = data.BucketIterator.splits(\n (train_data, val_data, test_data), \n batch_size=32)\n# your code here\nuse_cuda = torch.cuda.is_available()\ndevice = torch.device("cuda" if use_cuda else "cpu") \n \n# Training function \ndef train(model, train_loader, optimizer, criterion, use_gpu=True): \n model.train() \n total_loss = 0.0 \n total_correct = 0 \n for batch in train_loader: \n text, labels = batch.text.to(device), batch.label.to(device) \n optimizer.zero_grad() \n logits, attention_weights = model(text)\n loss = criterion(logits, labels) \n \n loss.backward() \n optimizer.step() \n \n total_loss += loss.item() * text.size(0) \n preds = logits.argmax(dim=1) \n total_correct += (preds == labels).sum().item() \n avg_loss = total_loss / len(train_loader.dataset) \n accuracy = total_correct / len(train_loader.dataset) \n return avg_loss, accuracy \n\ndef evaluate(model, iterator, criterion):\n epoch_loss = 0\n epoch_acc = 0\n\n model.eval()\n\n with torch.no_grad():\n for batch in iterator:\n text, labels = batch.text.to(device), batch.label.to(device) \n predictions, _ = model(text)\n\n loss = criterion(predictions, batch.label)\n acc = accuracy(predictions, batch.label)\n\n epoch_loss += loss.item()\n epoch_acc += acc.item()\n return epoch_loss / len(iterator), epoch_acc / len(iterator)\n\ndef accuracy(preds, labels):\n return (preds.argmax(dim=1) == labels).float().mean()\n\n# Model definition\nclass SentimentClassifier(nn.Module):\n def init(self, vocab_size, embedding_dim, hidden_dim, output_dim):\n super(SentimentClassifier, self).init()\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self.rnn = nn.LSTM(embedding_dim, hidden_dim)\n self.fc = nn.Linear(hidden_dim, output_dim)\n def forward(self, text):\n embedded = self.embedding(text)\n output, (hidden, cell) = self.rnn(embedded)\n logits = self.fc(hidden[-1])\n return logits\n\n# Instantiate model and optimizer\nmodel = SentimentClassifier(vocab_size, embedding_dim, hidden_dim, label_size)\noptimizer = optim.Adam(model.parameters())\ncriterion = nn.CrossEntropyLoss()\n\n# Training loop\nnum_epochs = 20\nfor epoch in range(1, num_epochs + 1):\n train_loss, train_acc = train(model, train_iter, optimizer, criterion)\n val_loss, val_acc = evaluate(model, val_iter, criterion)\n print(f'Epoch [{epoch}/{num_epochs}], Train Loss: {train_loss:.4f}, Val Loss: {val_loss:.4f}, Train Acc: {train_acc:.4f}, Val Acc: {val_acc:.4f}')\n\n# Evaluate on test set\ntest_loss, test_acc = evaluate(model, test_iter, criterion)\nprint(f'Test loss: {test_loss:.4f}, Test accuracy: {test_acc:.4f}')