import\u0020copy\nimport\u0020torch\nfrom\u0020torch\u0020import\u0020nn\nfrom\u0020torch\u0020import\u0020optim\nimport\u0020torchtext\nfrom\u0020torchtext\u0020import\u0020data\nfrom\u0020torchtext\u0020import\u0020datasets\nfrom\u0020torchtext.vocab\u0020import\u0020GloVe\n\nTEXT\u0020=\u0020data.Field(sequential=True,\u0020batch_first=True,\u0020lower=True)\nLABEL\u0020=\u0020data.LabelField()\n\n#\u0020load\u0020data\u0020splits\ntrain_data,\u0020val_data,\u0020test_data\u0020=\u0020datasets.SST.splits(TEXT,\u0020LABEL)\n\n#\u0020build\u0020dictionary\nTEXT.build_vocab(train_data)\nLABEL.build_vocab(train_data)\n#\u0020hyperparameters\nvocab_size\u0020=\u0020len(TEXT.vocab)\nlabel_size\u0020=\u0020len(LABEL.vocab)\npadding_idx\u0020=\u0020TEXT.vocab.stoi['']\npretrained_vectors\u0020=\u0020GloVe(name='6B',\u0020dim=300)\nembedding_dim\u0020=\u0020(pretrained_vectors.dim,)\nhidden_dim\u0020=\u0020128\n\n#\u0020build\u0020iterators\ntrain_iter,\u0020val_iter,\u0020test_iter\u0020=\u0020data.BucketIterator.splits(\n (train_data,\u0020val_data,\u0020test_data),\n batch_size=32)\n#\u0020your\u0020code\u0020here\nuse_cuda\u0020=\u0020torch.cuda.is_available()\ndevice\u0020=\u0020torch.device("cuda"\u0020if\u0020use_cuda\u0020else\u0020"cpu")\n \n#\u0020Training\u0020function \ndef\u0020train(model,\u0020train_loader,\u0020optimizer,\u0020criterion): \n model.train() \n total_loss\u0020=\u00200.0 \n total_correct\u0020=\u00200 \n for\u0020batch\u0020in\u0020train_loader: \n text,\u0020labels\u0020=\u0020batch.text.to(device),\u0020batch.label.to(device) \n optimizer.zero_grad() \n logits,\u0020attention_weights\u0020=\u0020model(text)\n loss\u0020=\u0020criterion(logits,\u0020labels) \n \n loss.backward() \n optimizer.step() \n \n total_loss\u0020+=\u0020loss.item()\u0020*\u0020text.size(0) \n preds\u0020=\u0020logits.argmax(dim=1) \n total_correct\u0020+=\u0020(preds\u0020==\u0020labels).sum().item() \n avg_loss\u0020=\u0020total_loss\u0020/\u0020len(train_loader.dataset) \n accuracy\u0020=\u0020total_correct\u0020/\u0020len(train_loader.dataset) \n return\u0020avg_loss,\u0020accuracy \n\ndef\u0020evaluate(model,\u0020iterator,\u0020criterion):\n epoch_loss\u0020=\u00200\n epoch_acc\u0020=\u00200\n\n model.eval()\n\n with\u0020torch.no_grad():\n for\u0020batch\u0020in\u0020iterator:\n text,\u0020labels\u0020=\u0020batch.text.to(device),\u0020batch.label.to(device) \n predictions,\u0020_\u0020=\u0020model(text)\n\n loss\u0020=\u0020criterion(predictions,\u0020batch.label)\n acc\u0020=\u0020accuracy(predictions,\u0020batch.label)\n\n epoch_loss\u0020+=\u0020loss.item()\n epoch_acc\u0020+=\u0020acc.item()\n\n return\u0020epoch_loss\u0020/\u0020len(iterator),\u0020epoch_acc\u0020/\u0020len(iterator)\n\n\ndef\u0020accuracy(predictions,\u0020labels):\n ,\u0020predicted_labels\u0020=\u0020torch.max(predictions,\u00201)\n correct\u0020=\u0020(predicted_labels\u0020==\u0020labels).float()\n accuracy\u0020=\u0020correct.sum()\u0020/\u0020len(correct)\n return\u0020accuracy\n\nclass\u0020Attention(nn.Module):\n def\u0020__init_(self,\u0020hidden_dim):\n super(Attention,\u0020self).init()\n self.hidden_dim\u0020=\u0020hidden_dim\n self.attention_weights\u0020=\u0020nn.Linear(hidden_dim,\u00201)\n \n def\u0020forward(self,\u0020lstm_output):\n attention_scores\u0020=\u0020self.attention_weights(lstm_output).squeeze(2)\n attention_weights\u0020=\u0020torch.softmax(attention_scores,\u0020dim=1)\n attention_output\u0020=\u0020torch.bmm(lstm_output.transpose(1,\u00202),\u0020attention_weights.unsqueeze(2)).squeeze(2)\n return\u0020attention_output,\u0020attention_weights\n\nclass\u0020RNNClassifier(nn.Module): \n def\u0020__init__(self,\u0020vocab_size,\u0020embedding_dim,\u0020hidden_dim,\u0020label_size,\u0020padding_idx): \n super(RNNClassifier,\u0020self).init() \n self.vocab_size\u0020=\u0020vocab_size \n self.embedding_dim\u0020=\u0020embedding_dim \n self.hidden_dim\u0020=\u0020hidden_dim \n self.label_size\u0020=\u0020label_size \n self.num_layers\u0020=\u00202\n self.dropout_num\u0020=\u00200.5\n self.bidirectional=True\n \n #\u0020Embedding\u0020Layer \n self.embedding\u0020=\u0020nn.Embedding.from_pretrained(pretrained_vectors,\u0020padding_idx=padding_idx) \n self.embedding_dropout\u0020=\u0020nn.Dropout(self.dropout_num) \n \n #\u0020LSTM\u0020Layer \n self.lstm\u0020=\u0020nn.LSTM(embedding_dim,\u0020hidden_dim,\u0020num_layers=self.num_layers,\u0020batch_first=True,\u0020bidirectional=self.bidirectional) \n self.lstm_dropout\u0020=\u0020nn.Dropout(self.dropout_num) \n \n #\u0020Attention\u0020Layer\n self.attention\u0020=\u0020Attention(hidden_dim\u0020*\u0020(2\u0020if\u0020self.bidirectional\u0020else\u00201)\u0020if\u0020self.bidirectional\u0020else\u0020hidden_dim)\n \n #\u0020Fully\u0020Connected\u0020Layer \n self.fc\u0020=\u0020nn.Linear(hidden_dim\u0020*\u0020(2\u0020if\u0020self.bidirectional\u0020else\u00201)\u0020if\u0020self.bidirectional\u0020else\u0020hidden_dim,\u0020label_size) \n self.fc_dropout\u0020=\u0020nn.Dropout(self.dropout_num) \n self.softmax\u0020=\u0020nn.LogSoftmax(dim=1)\n \n def\u0020zero_state(self,\u0020batch_size): \n hidden\u0020=\u0020torch.zeros(self.num_layers\u0020*\u0020(2\u0020if\u0020self.bidirectional\u0020else\u00201),\u0020batch_size,\u0020self.hidden_dim).to(device) \n cell\u0020=\u0020torch.zeros(self.num_layers\u0020*\u0020(2\u0020if\u0020self.bidirectional\u0020else\u00201),\u0020batch_size,\u0020self.hidden_dim).to(device)\n return\u0020hidden,\u0020cell \n \n def\u0020forward(self,\u0020text): \n emb\u0020=\u0020self.embedding(text) \n \n h0,\u0020c0\u0020=\u0020self.zero_state(text.size(0)) \n output,\u0020(hn,\u0020cn)\u0020=\u0020self.lstm(emb,\u0020(h0,\u0020c0))\n output\u0020=\u0020self.lstm_dropout(output)\n \n attention_output,\u0020attention_weights\u0020=\u0020self.attention(output)\n \n output\u0020=\u0020self.fc(attention_output) \n output\u0020=\u0020self.fc_dropout(output) \n \n return\u0020output,\u0020attention_weights\n\nmodel\u0020=\u0020RNNClassifier(vocab_size,\u0020embedding_dim,\u0020hidden_dim,\u0020label_size,padding_idx).to(device) \noptimizer\u0020=\u0020optim.Adam(model.parameters(),\u0020lr=0.001) \ncriterion\u0020=\u0020nn.CrossEntropyLoss()\nnum_epochs\u0020=\u002020\n\ntrain_losses\u0020=\u0020[]\nval_losses\u0020=\u0020[]\ntrain_accs\u0020=\u0020[]\nval_accs\u0020=\u0020[]\n\ndef\u0020model_running(model,train_iter,\u0020val_iter,optimizer,\u0020criterion):\n best_val_acc\u0020=\u00200.0\n for\u0020epoch\u0020in\u0020range(1,num_epochs+1):\n train_acc\u0020=\u00200.0\n train_loss\u0020=\u00200.0\n val_acc\u0020=\u00200.0\n val_loss\u0020=\u00200.0\n test_acc\u0020=\u00200.0\n test_loss\u0020=\u00200.0\n \n train_loss,\u0020train_acc\u0020=\u0020train(model,\u0020train_iter,\u0020optimizer,\u0020criterion)\n val_loss,\u0020val_acc\u0020=\u0020evaluate(model,\u0020val_iter,\u0020criterion)\n \n #\u0020Print\u0020progress\n print('Epoch\u0020[{}/{}],\u0020Train\u0020Loss:\u0020{:.4f},\u0020Val\u0020Loss:\u0020{:.4f},\u0020Train\u0020Acc:\u0020{:.4f},\u0020Val\u0020Acc:\u0020{:.4f}'\n .format(epoch,\u0020num_epochs,\u0020train_loss,\u0020val_loss,\u0020train_acc,\u0020val_acc))\n \n #\u0020Save\u0020loss\u0020and\u0020accuracy\u0020to\u0020list\n train_losses.append(train_loss)\n val_losses.append(val_loss)\n train_accs.append(train_acc)\n val_accs.append(val_acc)\n \n if\u0020val_acc\u0020>\u0020best_val_acc: \n best_val_acc\u0020=\u0020val_acc \n best_model\u0020=\u0020model.state_dict() \n \n return\u0020best_model\n\nbest_model\u0020=\u0020model_running(model,train_iter,\u0020val_iter,optimizer,\u0020criterion)\nmodel.load_state_dict(best_model) \n \ntest_loss,\u0020test_acc\u0020=\u0020evaluate(model,\u0020test_iter,\u0020criterion) \nprint(f"Test\u0020loss:\u0020{test_loss:.4f},\u0020Test\u0020accuracy:\u0020{test_acc:.4f}")\n