patch_prediction_extractor.py 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134
  1. # -*-coding:UTF-8-*-
  2. """get prediction for each backend
  3. """
  4. import sys
  5. import os
  6. sys.path.append("../")
  7. import numpy as np
  8. import pickle
  9. import argparse
  10. import configparser
  11. from scripts.tools.utils import DataUtils
  12. from scripts.logger.lemon_logger import Logger
  13. import warnings
  14. main_logger = Logger()
  15. def custom_objects():
  16. def no_activation(x):
  17. return x
  18. def leakyrelu(x):
  19. import keras.backend as K
  20. return K.relu(x, alpha=0.01)
  21. objects = {}
  22. objects['no_activation'] = no_activation
  23. objects['leakyrelu'] = leakyrelu
  24. return objects
  25. def _get_prediction(bk, x, y, model_path,batch_size):
  26. """
  27. Get prediction of models on different backends
  28. """
  29. test_x, test_y = x[:flags.test_size],y[:flags.test_size]
  30. predict_model = keras.models.load_model(model_path,custom_objects=custom_objects())
  31. # predict_model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
  32. main_logger.info("INFO:load model and compile done!")
  33. res = predict_model.predict(test_x,batch_size=batch_size)
  34. root_dir = model_path.split("origin_model")[0]
  35. npy_path = root_dir + 'res.npy' # 保存模型预测结果的路径,patch_prediction_extractor.py中的44行改成一样的路径
  36. #test_x:测试集
  37. #batch_size:单次训练数据样本大小
  38. np.save(npy_path,res)
  39. #把预测结果保存到本地
  40. main_logger.info("SUCCESS:Get prediction for {} successfully on {}!".format(mut_model_name,bk))
  41. """Store prediction result to redis"""
  42. # redis_conn.hset("prediction_{}".format(mut_model_name),bk,pickle.dumps(res))
  43. if __name__ == "__main__":
  44. """Parser of command args"""
  45. parse = argparse.ArgumentParser()
  46. parse.add_argument("--backend", type=str, help="name of backends")
  47. parse.add_argument("--exp", type=str, help="experiments identifiers")
  48. parse.add_argument("--test_size", type=int, help="amount of testing image")
  49. parse.add_argument("--model", type=str, help="path of the model to predict")
  50. #parse.add_argument("--redis_db", type=int)
  51. parse.add_argument("--config_name", type=str)
  52. flags, unparsed = parse.parse_known_args(sys.argv[1:])
  53. """Load Configuration"""
  54. warnings.filterwarnings("ignore")
  55. lemon_cfg = configparser.ConfigParser()
  56. # lemon_cfg.read(f"./config/{flags.config_name}")
  57. root_dir = flags.model.split("origin_model")[0]
  58. cfg_path = os.path.join(os.path.dirname(os.getcwd()), "config", flags.config_name)
  59. lemon_cfg.read(cfg_path)
  60. #pool = redis.ConnectionPool(host=lemon_cfg['redis']['host'], port=lemon_cfg['redis']['port'],db=flags.redis_db)
  61. #redis_conn = redis.Redis(connection_pool=pool)
  62. parameters = lemon_cfg['parameters']
  63. # gpu_ids = parameters['gpu_ids']
  64. # gpu_list = parameters['gpu_ids'].split(",")
  65. """Init cuda"""
  66. #os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
  67. #os.environ["CUDA_VISIBLE_DEVICES"] = gpu_ids
  68. warnings.filterwarnings("ignore")
  69. batch_size= 32
  70. """Switch backend"""
  71. bk_list = ['tensorflow', 'mxnet']
  72. bk = flags.backend
  73. os.environ['KERAS_BACKEND'] = bk
  74. os.environ['PYTHONHASHSEED'] = '0'
  75. if bk == 'tensorflow':
  76. os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2' # 只显示 warning 和 Error
  77. import tensorflow as tf
  78. main_logger.info(tf.__version__)
  79. batch_size = 128
  80. import keras
  81. if bk == 'theano':
  82. # if len(gpu_list) == 2:
  83. # os.environ['THEANO_FLAGS'] = f"device=cuda,contexts=dev{gpu_list[0]}->cuda{gpu_list[0]};dev{gpu_list[1]}->cuda{gpu_list[1]}," \
  84. # f"force_device=True,floatX=float32,lib.cnmem=1"
  85. # else:
  86. # os.environ['THEANO_FLAGS'] = f"device=cuda,contexts=dev{gpu_list[0]}->cuda{gpu_list[0]}," \
  87. # f"force_device=True,floatX=float32,lib.cnmem=1"
  88. import theano as th
  89. import keras
  90. main_logger.info(th.__version__)
  91. if bk == "cntk":
  92. #from cntk.device import try_set_default_device,gpu
  93. #try_set_default_device(gpu(int(gpu_list[0])))
  94. import cntk as ck
  95. main_logger.info(ck.__version__)
  96. import keras
  97. if bk == "mxnet":
  98. import mxnet as mxnet
  99. main_logger.info(f"mxnet_version {mxnet.__version__}")
  100. import keras
  101. batch_size = 16
  102. from keras import backend as K
  103. try:
  104. """Get model prediction"""
  105. main_logger.info("INFO:Using {} as backend for states extraction| {} is wanted".format(K.backend(),bk))
  106. x, y = DataUtils.get_data_by_exp(flags.exp)#读取数据集并做转换
  107. mut_model_name = os.path.split(flags.model)[-1]
  108. print(flags.model)
  109. _get_prediction(bk=bk, x=x, y=y, model_path=flags.model,batch_size=batch_size)
  110. except Exception:
  111. import traceback
  112. traceback.print_exc()
  113. sys.exit(-1)