test.py 1.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253
  1. import torch
  2. from torch.autograd import Variable
  3. from shutil import copyfile
  4. from sklearn.metrics import classification_report, confusion_matrix, accuracy_score
  5. def test(dataloader, net, criterion, optimizer, opt):
  6. test_loss = 0
  7. correct = 0
  8. net.eval()
  9. all_targets = []
  10. all_predicted = []
  11. for i, (adj_matrix, embedding_matrix, target) in enumerate(dataloader, 0):
  12. # padding = torch.zeros(len(annotation), opt.n_node, opt.state_dim - opt.annotation_dim).double()
  13. # init_input = torch.cat((annotation, padding), 2)
  14. # init_input = torch.zeros(len(adj_matrix), opt.n_node, opt.state_dim).double()
  15. init_input = embedding_matrix
  16. if opt.cuda:
  17. init_input = init_input.cuda()
  18. adj_matrix = adj_matrix.cuda()
  19. # annotation = annotation.cuda()
  20. target = target.cuda()
  21. init_input = Variable(init_input)
  22. adj_matrix = Variable(adj_matrix)
  23. # annotation = Variable(annotation)
  24. target = Variable(target)
  25. # print(target)
  26. output = net(init_input, adj_matrix)
  27. # print(output)
  28. # test_loss += criterion(output, target).data[0]
  29. test_loss += criterion(output, target).item()
  30. pred = output.data.max(1, keepdim=True)[1]
  31. # print(pred)
  32. all_predicted.extend(pred.data.view_as(target).cpu().numpy())
  33. all_targets.extend(target.cpu().numpy())
  34. correct += pred.eq(target.data.view_as(pred)).cpu().sum()
  35. test_loss /= len(dataloader.dataset)
  36. print('Accuracy:', accuracy_score(all_targets, all_predicted))
  37. print(classification_report(all_targets, all_predicted))
  38. print(confusion_matrix(all_targets, all_predicted))
  39. print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)'.format(
  40. test_loss, correct, len(dataloader.dataset),
  41. 100. * correct / len(dataloader.dataset)))