mutate_lemon.py 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518
  1. # -*-coding:UTF-8-*-
  2. import csv
  3. from itertools import *
  4. import keras
  5. import json
  6. import networkx as nx
  7. import sys
  8. # sys.path.append("../")
  9. import os
  10. sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
  11. from scripts.logger.lemon_logger import Logger
  12. from scripts.tools.mutator_selection_logic import Roulette, MCMC
  13. from scripts.mutation.model_mutation_generators import *
  14. import argparse
  15. import ast
  16. import numpy as np
  17. from scripts.mutation.mutation_utils import *
  18. import pickle
  19. from scripts.tools import utils
  20. from scripts.tools.utils import ModelUtils
  21. import shutil
  22. import re
  23. import datetime
  24. import configparser
  25. import warnings
  26. import math
  27. lines = 0
  28. # np.random.seed(20200501)
  29. warnings.filterwarnings("ignore")
  30. os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
  31. os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
  32. os.environ["CUDA_VISIBLE_DEVICES"] = ""
  33. import psutil
  34. def partially_nan_or_inf(predictions, bk_num): # 检查是否无穷大或空
  35. """
  36. Check if there is NAN in the result
  37. """
  38. def get_nan_num(nds):
  39. _nan_num = 0
  40. for nd in nds:
  41. if np.isnan(nd).any() or np.isinf(nd).any():
  42. _nan_num += 1
  43. return _nan_num
  44. if len(predictions) == bk_num:
  45. for input_predict in zip(*predictions):
  46. nan_num = get_nan_num(input_predict)
  47. if 0 < nan_num < bk_num:
  48. return True
  49. else:
  50. continue
  51. return False
  52. else:
  53. raise Exception("wrong backend amounts")
  54. def get_selector_by_startegy_name(mutator_s, mutant_s):
  55. mutant_strategy_dict = {"ROULETTE": Roulette}
  56. mutator_strategy_dict = {"MCMC": MCMC}
  57. return mutator_strategy_dict[mutator_s], mutant_strategy_dict[mutant_s]
  58. def save_mutate_history(selector, invalid_history: dict, mutant_history: list):
  59. mutator_history_path = os.path.join(experiment_dir, "mutator_history.csv")
  60. mutant_history_path = os.path.join(experiment_dir, "mutant_history.txt")
  61. with open(mutator_history_path, "w+") as fw:
  62. fw.write("Name,Success,Invalid,Total\n")
  63. for op in invalid_history.keys():
  64. mtrs = selector.mutators[op]
  65. invalid_cnt = invalid_history[op]
  66. fw.write("{},{},{},{}\n".format(op, mtrs.delta_bigger_than_zero, invalid_cnt, mtrs.total))
  67. with open(mutant_history_path, "w+") as fw:
  68. for mutant in mutant_history:
  69. fw.write("{}\n".format(mutant))
  70. def is_nan_or_inf(t):
  71. if math.isnan(t) or math.isinf(t):
  72. return True
  73. else:
  74. return False
  75. def continue_checker(**run_stat): # 判断算法是否满足退出条件
  76. start_time = run_stat['start_time']
  77. time_limitation = run_stat['time_limit']
  78. cur_counters = run_stat['cur_counters']
  79. counters_limit = run_stat['counters_limit']
  80. s_mode = run_stat['stop_mode']
  81. # if timing
  82. # 时间限制
  83. if s_mode == 'TIMING':
  84. hours, minutes, seconds = utils.ToolUtils.get_HH_mm_ss(datetime.datetime.now() - start_time)
  85. total_minutes = hours * 60 + minutes
  86. mutate_logger.info(f"INFO: Mutation progress: {total_minutes}/{time_limitation} Minutes!")
  87. if total_minutes < time_limitation:
  88. return True
  89. else:
  90. return False
  91. # if counters
  92. # 次数限制,size(models)<N
  93. elif s_mode == 'COUNTER':
  94. if cur_counters < counters_limit:
  95. mutate_logger.info("INFO: Mutation progress {}/{}".format(cur_counters + 1, counters_limit))
  96. return True
  97. else:
  98. return False
  99. else:
  100. raise Exception(f"Error! Stop Mode {s_mode} not Found!")
  101. def calc_inner_div(model):
  102. graph = nx.DiGraph()
  103. for layer in model.layers:
  104. graph.add_node(layer.name)
  105. for inbound_node in layer._inbound_nodes:
  106. if inbound_node.inbound_layers:
  107. for parent_layer in inbound_node.inbound_layers:
  108. graph.add_edge(parent_layer.name, layer.name)
  109. longest_path = nx.dag_longest_path(graph)
  110. return len(longest_path) / len(graph)
  111. def _generate_and_predict(res_dict, filename, mutate_num, mutate_ops, test_size, exp, backends):
  112. # 主算法函数
  113. """
  114. Generate models using mutate operators and store them
  115. """
  116. mutate_op_history = {k: 0 for k in mutate_ops}
  117. mutate_op_invalid_history = {k: 0 for k in mutate_ops}
  118. mutant_history = []
  119. # get mutator selection strategy
  120. if 'svhn' in exp or 'fashion2' in exp:
  121. origin_model_name = "{}_origin0.hdf5".format(exp)
  122. else:
  123. origin_model_name = "{}_origin0.h5".format(exp)
  124. # 初始种子模型列表Ms初始时只有这一个模型
  125. root_dir = os.path.dirname(os.getcwd())
  126. origin_save_path = os.path.join(mut_dir, origin_model_name)
  127. mutator_selector_func, mutant_selector_func = get_selector_by_startegy_name(mutator_strategy, mutant_strategy)
  128. # [origin_model_name] means seed pool only contains initial model at beginning.
  129. mutator_selector, mutant_selector = mutator_selector_func(mutate_ops), mutant_selector_func([origin_model_name],
  130. capacity=mutate_num + 1)
  131. # MCMC,Roulette
  132. shutil.copy(src=filename, dst=origin_save_path)
  133. origin_model_status, res_dict, accumulative_inconsistency, _ = get_model_prediction(res_dict,
  134. origin_save_path,
  135. origin_model_name, exp,
  136. test_size, backends)
  137. if not origin_model_status:
  138. mutate_logger.error(f"Origin model {exp} crashed on some backends! LEMON would skip it")
  139. sys.exit(-1)
  140. last_used_mutator = None
  141. last_inconsistency = accumulative_inconsistency # ACC
  142. mutant_counter = 0
  143. start_time = datetime.datetime.now()
  144. order_inconsistency_dict = {}
  145. run_stat = {'start_time': start_time, 'time_limit': time_limit, 'cur_counters': mutant_counter,
  146. 'counters_limit': mutate_num, 'stop_mode': stop_mode}
  147. # 满足限制条件就继续循环
  148. while continue_checker(**run_stat):
  149. global model_num
  150. if model_num == mutate_num:
  151. break
  152. picked_seed = utils.ToolUtils.select_mutant(mutant_selector) # 轮盘赌选择种子模型(伪代码3-14行)
  153. selected_op = utils.ToolUtils.select_mutator(mutator_selector,
  154. last_used_mutator=last_used_mutator) # 蒙特卡洛选择变异算子(伪代码15-20行)
  155. mutate_op_history[selected_op] += 1
  156. last_used_mutator = selected_op
  157. mutator = mutator_selector.mutators[selected_op] # 变异算子对象
  158. mutant = mutant_selector.mutants[picked_seed] # 种子模型对象
  159. if 'svhn' in picked_seed or 'fashion2' in picked_seed:
  160. new_seed_name = "{}-{}{}.hdf5".format(picked_seed[:-5], selected_op, mutate_op_history[selected_op])
  161. else:
  162. new_seed_name = "{}-{}{}.h5".format(picked_seed[:-3], selected_op, mutate_op_history[selected_op]) # 生成新模型
  163. # seed name would not be duplicate
  164. if new_seed_name not in mutant_selector.mutants.keys():
  165. # 对应伪代码22行,因为种子模型是以当前选择的种子模型和变异算子命名的,所以重名就表示这个模型已经存在了
  166. new_seed_path = os.path.join(mut_dir, new_seed_name)
  167. picked_seed_path = os.path.join(mut_dir, picked_seed)
  168. mutate_st = datetime.datetime.now()
  169. model_mutation_generators = root_dir + "/scripts/mutation/model_mutation_generators.py"
  170. mutate_status = os.system("{}/lemon/bin/python -u {} --model {} "
  171. "--mutate_op {} --save_path {} --mutate_ratio {}".format(python_prefix,
  172. model_mutation_generators,
  173. picked_seed_path,
  174. selected_op,
  175. new_seed_path,
  176. flags.mutate_ratio))
  177. # 使用变异算子进行变异(伪代码21行)
  178. mutate_et = datetime.datetime.now()
  179. mutate_dt = mutate_et - mutate_st
  180. h, m, s = utils.ToolUtils.get_HH_mm_ss(mutate_dt)
  181. mutate_logger.info("INFO:Mutate Time Used on {} : {}h, {}m, {}s".format(selected_op, h, m, s))
  182. # mutation status code is successful
  183. if mutate_status == 0: # 变异执行完成
  184. mutant.selected += 1
  185. mutator.total += 1
  186. # execute this model on all platforms
  187. predict_status, res_dict, accumulative_inconsistency, model_outputs = \
  188. get_model_prediction(res_dict, new_seed_path, new_seed_name, exp, test_size, backends)
  189. # 计算ACC(m)
  190. if predict_status:
  191. mutant_history.append(new_seed_name)
  192. # 伪代码23-25行
  193. print('type:', type(model_outputs))
  194. print('model_outputs:',model_outputs)
  195. if utils.ModelUtils.is_valid_model(inputs_backends=model_outputs, backends_nums=len(backends)):
  196. delta = accumulative_inconsistency - last_inconsistency # 也就是ACC(m)-ACC(s)
  197. # 下面两个if好像没什么用,因为mutator字典里只有MCMC,mutant字典里只有ROULETTE
  198. if mutator_strategy == 'MCMC':
  199. mutator.delta_bigger_than_zero = mutator.delta_bigger_than_zero + 1 \
  200. if delta > 0 else mutator.delta_bigger_than_zero
  201. if mutant_strategy == 'ROULETTE' and delta > 0:
  202. # when size >= capacity:
  203. # random_mutant & Roulette would drop one and add new one
  204. if mutant_selector.is_full():
  205. mutant_selector.pop_one_mutant()
  206. mutant_selector.add_mutant(new_seed_name) # 如果放大了不一致程度,即ACC(m)>=ACC(s),就加入到种子模型集合里
  207. last_inconsistency = accumulative_inconsistency # 29行
  208. mutate_logger.info("SUCCESS:{} pass testing!".format(new_seed_name))
  209. mutant_counter += 1
  210. else:
  211. mutate_op_invalid_history[selected_op] += 1
  212. mutate_logger.error("Invalid model Found!")
  213. else:
  214. mutate_logger.error("Crashed or NaN model Found!")
  215. else:
  216. mutate_logger.error("Exception raised when mutate {} with {}".format(picked_seed, selected_op))
  217. mutate_logger.info("Mutated op used history:")
  218. mutate_logger.info(mutate_op_history)
  219. mutate_logger.info("Invalid mutant generated history:")
  220. mutate_logger.info(mutate_op_invalid_history)
  221. run_stat['cur_counters'] = mutant_counter
  222. save_mutate_history(mutator_selector, mutate_op_invalid_history, mutant_history)
  223. # calc_cov = CoverageCalculatornew(all_json_path, api_config_pool_path)
  224. # lines = 0
  225. # for file in os.listdir(folder_path):
  226. # if file == 'total.json': continue
  227. # file_path = os.path.join(folder_path, file)
  228. # calc_cov.load_json(file_path)
  229. # with open(file_path, 'r') as sub_json:
  230. # sub_info = json.load(sub_json)
  231. # outer_div = len(tar_set - set(sub_info['layer_type']))
  232. # input_cov, config_cov, api_cov, op_type_cov, op_num_cov, edge_cov = calc_cov.cal_coverage()
  233. # with open(output_path, 'a+', newline='') as fi:
  234. # writer = csv.writer(fi)
  235. # head = ['Layer Input Coverage', 'Layer Parameter Diversity', 'Layer Sequence Diversity',
  236. # 'Operator Type Coverage', 'Operator Num Coverage', 'Edge Coverage', 'Accumulative inconsistency']
  237. # if not lines:
  238. # writer.writerow(head)
  239. # lines += 1
  240. # printlist = [input_cov, config_cov, api_cov, op_type_cov, op_num_cov, edge_cov,
  241. # acc[lines]]
  242. # writer.writerow(printlist)
  243. return res_dict
  244. def generate_metrics_result(res_dict, predict_output, model_idntfr): # 计算ACC
  245. mutate_logger.info("Generating Metrics Result")
  246. accumulative_incons = 0
  247. backends_pairs_num = 0
  248. # Compare results pair by pair
  249. for pair in combinations(predict_output.items(), 2): # 每一对库
  250. backends_pairs_num += 1
  251. backend1, backend2 = pair
  252. bk_name1, prediction1 = backend1
  253. bk_name2, prediction2 = backend2
  254. bk_pair = "{}_{}".format(bk_name1, bk_name2)
  255. for metrics_name, metrics_result_dict in res_dict.items():
  256. metrics_func = utils.MetricsUtils.get_metrics_by_name(metrics_name) # 计算
  257. # metrics_results in list type
  258. metrics_results = metrics_func(prediction1, prediction2, y_test[:flags.test_size])
  259. # 一共test_size个数据集,所以metrics_result是长度为test_size的预测结果列表
  260. # ACC -> float: The sum of all inputs under all backends
  261. accumulative_incons += sum(metrics_results) # ACC=∑
  262. for input_idx, delta in enumerate(metrics_results):
  263. delta_key = "{}_{}_{}_input{}".format(model_idntfr, bk_name1, bk_name2, input_idx)
  264. metrics_result_dict[delta_key] = delta
  265. mutate_logger.info(f"Accumulative Inconsistency: {accumulative_incons}")
  266. return res_dict, accumulative_incons
  267. def generate_gini_result(predict_output, backends):
  268. gini_res = {bk: 0 for bk in backends}
  269. for pair in predict_output.items():
  270. bk_name, prediction = pair
  271. gini_res[bk_name] = utils.MetricsUtils.get_gini_mean(prediction)
  272. return gini_res
  273. def generate_theta(predict_output, backends):
  274. theta_res = {bk: 0 for bk in backends}
  275. for pair in predict_output.items():
  276. bk_name, prediction = pair
  277. theta_res[bk_name] = utils.MetricsUtils.get_theta_mean(prediction, y_test[:flags.test_size])
  278. return theta_res
  279. SHAPE_SPACE = 5
  280. model_num = 0
  281. def get_model_prediction(res_dict, model_path, model_name, exp, test_size, backends):
  282. # 计算ACC
  283. """
  284. Get model prediction on different backends and calculate distance by metrics
  285. """
  286. root_dir = model_path.split("origin_model")[0]
  287. npy_path = root_dir + 'res.npy' # 保存模型预测结果的路径,patch_prediction_extractor.py中的44行改成一样的路径
  288. predict_output = {b: [] for b in backends}
  289. model_idntfr = model_name[:-3]
  290. all_backends_predict_status = True
  291. for bk in backends:
  292. python_bin = f"{python_prefix}/{bk}/bin/python"
  293. predict_st = datetime.datetime.now()
  294. # 使用不同的库进行预测
  295. pre_status_bk = os.system(f"{python_bin} -u -m patch_prediction_extractor --backend {bk} "
  296. f"--exp {exp} --test_size {test_size} --model {model_path} "
  297. f"--config_name {flags.config_name}")
  298. predict_et = datetime.datetime.now()
  299. predict_td = predict_et - predict_st
  300. h, m, s = utils.ToolUtils.get_HH_mm_ss(predict_td)
  301. mutate_logger.info("Prediction Time Used on {} : {}h, {}m, {}s".format(bk, h, m, s))
  302. # If no exception is thrown,save prediction result
  303. if pre_status_bk == 0: # 预测执行成功,保存结果
  304. # data = pickle.loads(redis_conn.hget("prediction_{}".format(model_name), bk))
  305. data = np.load(npy_path)
  306. predict_output[bk] = data
  307. # print(data)
  308. # record the crashed backend
  309. else:
  310. all_backends_predict_status = False
  311. mutate_logger.error("{} crash on backend {} when predicting ".format(model_name, bk))
  312. status = False
  313. accumulative_incons = None
  314. # run ok on all platforms
  315. if all_backends_predict_status: # 所有的库都执行成功且保存了结果,判断结果中是否有错误
  316. predictions = list(predict_output.values())
  317. res_dict, accumulative_incons = generate_metrics_result(res_dict=res_dict, predict_output=predict_output,
  318. model_idntfr=model_idntfr)
  319. # 计算ACC(用于衡量预测结果的不一致程度)
  320. # gini_res = generate_gini_result(predict_output=predict_output, backends=backends)
  321. # theta = generate_theta(predict_output=predict_output, backends=backends)
  322. # import csv
  323. # csvfile = open(r"D:\lemon_outputs\result\mobilenet.1.00.224-imagenet\tensorflow\5.csv", 'a+',newline='')
  324. # write=csv.writer(csvfile)
  325. # write.writerow([accumulative_incons, gini_res['tensorflow'], theta['tensorflow']])
  326. # csvfile.close()
  327. #
  328. # csvfile = open(r"D:\lemon_outputs\result\mobilenet.1.00.224-imagenet\mxnet\5.csv", 'a+',newline='')
  329. # write=csv.writer(csvfile)
  330. # write.writerow([accumulative_incons, gini_res['mxnet'], theta['mxnet']])
  331. # csvfile.close()
  332. # 计算gini
  333. # If all backends are working fine, check if there exists NAN or INF in the result
  334. # `accumulative_incons` is nan or inf --> NaN or INF in results
  335. if is_nan_or_inf(accumulative_incons):
  336. # has NaN on partial backends
  337. if partially_nan_or_inf(predictions, len(backends)):
  338. nan_model_path = os.path.join(nan_dir, f"{model_idntfr}_NaN_bug.h5")
  339. mutate_logger.error("Error: Found one NaN bug. move NAN model")
  340. # has NaN on all backends --> not a NaN bug
  341. else:
  342. nan_model_path = os.path.join(nan_dir, f"{model_idntfr}_NaN_on_all_backends.h5")
  343. mutate_logger.error("Error: Found one NaN Model on all libraries. move NAN model")
  344. shutil.move(model_path, nan_model_path)
  345. else: # No NaN or INF on any backend
  346. # print(model_path)
  347. for bk in backends:
  348. python_bin = f"{python_prefix}/{bk}/bin/python"
  349. os.system(f"{python_bin} -u -m model_to_txt --backend {bk} --model_path {model_path} --root_dir {root_dir}")
  350. os.system(f"{python_bin} -u -m draw_result --backend {bk} --model_path {model_path}")
  351. # if 'svhn' in model_name or 'fashion2' in model_name:
  352. # file_path = os.path.join(folder_path, model_path.split("\\")[-1][:-5] + '.json')
  353. # else:
  354. # file_path = os.path.join(folder_path, model_path.split("\\")[-1][:-3] + '.json')
  355. # union_json(file_path, all_json_path)
  356. # model_now = keras.models.load_model(model_path, custom_objects=custom_objects())
  357. # inner_div[model_num] = calc_inner_div(model_now)
  358. # with open(file_path, 'r') as sub_json:
  359. # sub_info = json.load(sub_json)
  360. # if len(set(sub_info['layer_type'])) > len(tar_set):
  361. # tar_set = set(sub_info['layer_type'])
  362. mutate_logger.info("Saving prediction")
  363. with open("{}/prediction_{}.pkl".format(inner_output_dir, model_idntfr), "wb+") as f:
  364. pickle.dump(predict_output, file=f)
  365. status = True
  366. # save crashed model
  367. else:
  368. mutate_logger.error("Error: move crash model")
  369. crash_model_path = os.path.join(crash_dir, model_name)
  370. shutil.move(model_path, crash_model_path)
  371. return status, res_dict, accumulative_incons, predict_output
  372. if __name__ == "__main__":
  373. starttime = datetime.datetime.now()
  374. """
  375. Parser of command args.
  376. It could make mutate_lemon.py run independently without relying on mutation_executor.py
  377. """
  378. parse = argparse.ArgumentParser()
  379. parse.add_argument("--is_mutate", type=ast.literal_eval, default=False,
  380. help="parameter to determine mutation option")
  381. parse.add_argument("--mutate_op", type=str, nargs='+',
  382. choices=['WS', 'GF', 'NEB', 'NAI', 'NS', 'ARem', 'ARep', 'LA', 'LC', 'LR', 'LS', 'MLA']
  383. , help="parameter to determine mutation option")
  384. parse.add_argument("--model", type=str, help="relative path of model file(from root dir)")
  385. parse.add_argument("--output_dir", type=str, help="relative path of output dir(from root dir)")
  386. parse.add_argument("--backends", type=str, nargs='+', help="list of backends")
  387. parse.add_argument("--mutate_num", type=int, help="number of variant models generated by each mutation operator")
  388. parse.add_argument("--mutate_ratio", type=float, help="ratio of mutation")
  389. parse.add_argument("--exp", type=str, help="experiments identifiers")
  390. parse.add_argument("--test_size", type=int, help="amount of testing image")
  391. parse.add_argument("--config_name", type=str, help="config name")
  392. flags, unparsed = parse.parse_known_args(sys.argv[1:])
  393. warnings.filterwarnings("ignore")
  394. lemon_cfg = configparser.ConfigParser()
  395. # lemon_cfg.read(f".\config\{flags.config_name}")
  396. cfg_path = os.path.join(os.path.dirname(os.getcwd()), "config", flags.config_name)
  397. lemon_cfg.read(cfg_path)
  398. # lemon_cfg.read(f"config/demo.conf")
  399. time_limit = lemon_cfg['parameters'].getint("time_limit")
  400. mutator_strategy = lemon_cfg['parameters'].get("mutator_strategy").upper()
  401. mutant_strategy = lemon_cfg['parameters'].get("mutant_strategy").upper()
  402. stop_mode = lemon_cfg['parameters'].get("stop_mode").upper()
  403. alpha = lemon_cfg['parameters'].getfloat("alpha")
  404. mutate_logger = Logger()
  405. # pool = redis.ConnectionPool(host=lemon_cfg['redis']['host'], port=lemon_cfg['redis']['port'],
  406. # db=lemon_cfg['redis'].getint('redis_db'))
  407. # redis_conn = redis.Redis(connection_pool=pool)
  408. # for k in redis_conn.keys():
  409. # if flags.exp in k.decode("utf-8"):
  410. # redis_conn.delete(k)
  411. # exp : like lenet5-mnist
  412. experiment_dir = os.path.join(flags.output_dir, flags.exp)
  413. mut_dir = os.path.join(experiment_dir, "mut_model")
  414. crash_dir = os.path.join(experiment_dir, "crash")
  415. nan_dir = os.path.join(experiment_dir, "nan")
  416. inner_output_dir = os.path.join(experiment_dir, "inner_output")
  417. metrics_result_dir = os.path.join(experiment_dir, "metrics_result")
  418. x, y = utils.DataUtils.get_data_by_exp(flags.exp) # 从conf文件中读取数据并转换形式
  419. x_test, y_test = x[:flags.test_size], y[:flags.test_size]
  420. pool_size = lemon_cfg['parameters'].getint('pool_size')
  421. python_prefix = lemon_cfg['parameters']['python_prefix'].rstrip("\\")
  422. try: # 执行算法
  423. metrics_list = lemon_cfg['parameters']['metrics'].split(" ") # D_MAD
  424. lemon_results = {k: dict() for k in metrics_list}
  425. lemon_results = _generate_and_predict(lemon_results, flags.model, flags.mutate_num, flags.mutate_op,
  426. flags.test_size, flags.exp, flags.backends)
  427. with open("{}/{}_lemon_results.pkl".format(experiment_dir, flags.exp), "wb+") as f:
  428. pickle.dump(lemon_results, file=f)
  429. utils.MetricsUtils.generate_result_by_metrics(metrics_list, lemon_results, metrics_result_dir, flags.exp)
  430. except Exception as e:
  431. mutate_logger.exception(sys.exc_info())
  432. from keras import backend as K
  433. K.clear_session()
  434. endtime = datetime.datetime.now()
  435. time_delta = endtime - starttime
  436. h, m, s = utils.ToolUtils.get_HH_mm_ss(time_delta)
  437. mutate_logger.info("Mutation process is done: Time used: {} hour,{} min,{} sec".format(h, m, s))