contrast_experiment.py 6.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159
  1. #!/usr/bin/python
  2. # coding=utf-8
  3. import os
  4. import numpy as np
  5. import logging
  6. import sklearn
  7. import torch
  8. from sklearn.model_selection import train_test_split # 导入切分训练集、测试集模块
  9. from sklearn.neighbors import KNeighborsClassifier
  10. from sklearn import svm
  11. from sklearn.naive_bayes import GaussianNB
  12. fileName = './constract.log'
  13. formatter = logging.Formatter('%(asctime)s [%(levelname)s] %(module)s: %(message)s',
  14. datefmt='%m/%d/%Y %H:%M:%S')
  15. handler = logging.FileHandler(filename=fileName, encoding="utf-8")
  16. handler.setFormatter(formatter)
  17. logging.basicConfig(level=logging.DEBUG, handlers=[handler])
  18. parent_path = os.path.dirname(os.path.realpath(__file__))
  19. grander_path = os.path.dirname(parent_path)
  20. word_list_data_path_base = parent_path + "/word_list_data/"
  21. word2index_path_base = grander_path + "/word2index/"
  22. dataset_name = "航天中认自主可控众包测试练习赛"
  23. max_len = 64
  24. vocab_size = 5000
  25. embedding_size = 64
  26. batch_size = 16
  27. random_state = 15
  28. def contrast():
  29. logging.info("正在加载初始数据")
  30. txts = np.load(word_list_data_path_base + str(dataset_name) + ".npy", allow_pickle=True)
  31. labels = np.load(word_list_data_path_base + str(dataset_name) + "_label.npy", allow_pickle=True)
  32. labels_new = []
  33. for label in labels:
  34. label_new = 0
  35. for i in range(len(label)):
  36. label_new += i * label[i]
  37. labels_new.append(label_new)
  38. labels_new = np.array(labels_new)
  39. logging.info("正在加载词表")
  40. word2index_path = word2index_path_base + str(dataset_name) + ".npy"
  41. word2index = np.load(word2index_path, allow_pickle=True).item()
  42. features = []
  43. for txt in txts:
  44. text_feature = text_to_feature(txt, word2index, max_len)
  45. features.append(text_feature)
  46. # np.save(, features)
  47. score_knn_lowest = 100
  48. score_svm_lowest = 100
  49. score_nb_lowest = 100
  50. score_knn_all = 0
  51. recall_knn_all = 0
  52. f1_knn_all = 0
  53. score_svm_all = 0
  54. recall_svm_all = 0
  55. f1_svm_all = 0
  56. score_nb_all = 0
  57. recall_nb_all = 0
  58. f1_nb_all = 0
  59. for i in range(random_state):
  60. train_data, test_data, train_label, test_label = sklearn.model_selection.train_test_split(features, labels_new,
  61. random_state=i,
  62. train_size=0.2,
  63. test_size=0.8)
  64. logging.info("正在训练k最近邻分类器")
  65. knn_classifier = KNeighborsClassifier()
  66. knn_classifier.fit(train_data, train_label)
  67. knn_predict = knn_classifier.predict(test_data)
  68. recall_knn = sklearn.metrics.recall_score(test_label, knn_predict, average="macro")
  69. f1_knn = sklearn.metrics.f1_score(test_label, knn_predict, average="macro")
  70. score_knn = knn_classifier.score(test_data, test_label)
  71. if score_knn < score_knn_lowest:
  72. score_knn_lowest = score_knn
  73. score_knn_all = score_knn_all + score_knn
  74. recall_knn_all += recall_knn
  75. f1_knn_all += f1_knn
  76. logging.info("k最近邻分类器准确率为{}".format(score_knn))
  77. logging.info("k最近邻分类器召回率为{}".format(recall_knn))
  78. logging.info("k最近邻分类器f1_score为{}".format(f1_knn))
  79. logging.info("正在训练SVM分类器")
  80. svm_classifier = svm.SVC(C=2, kernel='rbf', gamma=10, decision_function_shape='ovr')
  81. svm_classifier.fit(train_data, train_label)
  82. svm_predict = svm_classifier.predict(test_data)
  83. recall_svm = sklearn.metrics.recall_score(test_label, svm_predict, average="macro")
  84. f1_svm = sklearn.metrics.f1_score(test_label, svm_predict, average="macro")
  85. score_svm = svm_classifier.score(test_data, test_label)
  86. if score_svm < score_svm_lowest:
  87. score_svm_lowest = score_svm
  88. score_svm_all = score_svm_all + score_svm
  89. recall_svm_all += recall_svm
  90. f1_svm_all += f1_svm
  91. logging.info("SVM分类器准确率为{}".format(score_svm))
  92. logging.info("SVM分类器召回率为{}".format(recall_svm))
  93. logging.info("SVM分类器f1_score为{}".format(f1_svm))
  94. logging.info("正在训练朴素贝叶斯分类器")
  95. muNB_classifier = GaussianNB()
  96. muNB_classifier.fit(train_data, train_label)
  97. muNB_predict = muNB_classifier.predict(test_data)
  98. recall_nb = sklearn.metrics.recall_score(test_label, muNB_predict, average="macro")
  99. f1_nb = sklearn.metrics.f1_score(test_label, muNB_predict, average="macro")
  100. score_nb = muNB_classifier.score(test_data, test_label)
  101. if score_nb < score_nb_lowest:
  102. score_nb_lowest = score_nb
  103. score_nb_all = score_nb_all + score_nb
  104. recall_nb_all += recall_nb
  105. f1_nb_all += f1_nb
  106. logging.info("朴素贝叶斯分类器准确率为{}".format(score_nb))
  107. logging.info("朴素贝叶斯分类器召回率为{}".format(recall_nb))
  108. logging.info("朴素贝叶斯分类器f1_score为{}".format(f1_nb))
  109. logging.info("k最近邻分类器最低准确率为{}".format(score_knn_lowest))
  110. logging.info("SVM分类器最低准确率为{}".format(score_svm_lowest))
  111. logging.info("朴素贝叶斯分类器最低准确率为{}".format(score_nb_lowest))
  112. logging.info("k最近邻分类器平均准确率为{}".format(score_knn_all / random_state))
  113. logging.info("SVM分类器平均准确率为{}".format(score_svm_all / random_state))
  114. logging.info("朴素贝叶斯分类器平均准确率为{}".format(score_nb_all / random_state))
  115. logging.info("k最近邻分类器平均召回率为{}".format(recall_knn_all / random_state))
  116. logging.info("SVM分类器平均召回率为{}".format(recall_svm_all / random_state))
  117. logging.info("朴素贝叶斯分类器平均召回率为{}".format(recall_nb_all / random_state))
  118. logging.info("k最近邻分类器平均f1_score为{}".format(f1_knn_all / random_state))
  119. logging.info("SVM分类器平均f1_score为{}".format(f1_svm_all / random_state))
  120. logging.info("朴素贝叶斯分类器平均f1_score为{}".format(f1_nb_all / random_state))
  121. def text_to_feature(text, word2index, max_len):
  122. feature = []
  123. for word in text:
  124. if word in word2index:
  125. feature.append(word2index[word])
  126. else:
  127. feature.append(word2index["<unk>"])
  128. if len(feature) == max_len:
  129. break
  130. feature = feature + [word2index["<pad>"]] * (max_len - len(feature))
  131. return feature
  132. def calculate_bi_standards(name):
  133. model = torch.load(name)
  134. pass
  135. if __name__ == "__main__":
  136. contrast()