|
@@ -5,11 +5,9 @@ import keras
|
|
import json
|
|
import json
|
|
import networkx as nx
|
|
import networkx as nx
|
|
import sys
|
|
import sys
|
|
-
|
|
|
|
# sys.path.append("../")
|
|
# sys.path.append("../")
|
|
import os
|
|
import os
|
|
-
|
|
|
|
-sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
|
|
|
|
|
+sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
|
|
|
|
|
|
from scripts.logger.lemon_logger import Logger
|
|
from scripts.logger.lemon_logger import Logger
|
|
from scripts.tools.mutator_selection_logic import Roulette, MCMC
|
|
from scripts.tools.mutator_selection_logic import Roulette, MCMC
|
|
@@ -34,7 +32,7 @@ import math
|
|
lines = 0
|
|
lines = 0
|
|
# np.random.seed(20200501)
|
|
# np.random.seed(20200501)
|
|
warnings.filterwarnings("ignore")
|
|
warnings.filterwarnings("ignore")
|
|
-os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
|
|
|
|
|
+os.environ["TF_CPP_MIN_LOG_LEVEL"] = '2'
|
|
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
|
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
|
os.environ["CUDA_VISIBLE_DEVICES"] = ""
|
|
import psutil
|
|
import psutil
|
|
@@ -78,11 +76,7 @@ def save_mutate_history(selector, invalid_history: dict, mutant_history: list):
|
|
for op in invalid_history.keys():
|
|
for op in invalid_history.keys():
|
|
mtrs = selector.mutators[op]
|
|
mtrs = selector.mutators[op]
|
|
invalid_cnt = invalid_history[op]
|
|
invalid_cnt = invalid_history[op]
|
|
- fw.write(
|
|
|
|
- "{},{},{},{}\n".format(
|
|
|
|
- op, mtrs.delta_bigger_than_zero, invalid_cnt, mtrs.total
|
|
|
|
- )
|
|
|
|
- )
|
|
|
|
|
|
+ fw.write("{},{},{},{}\n".format(op, mtrs.delta_bigger_than_zero, invalid_cnt, mtrs.total))
|
|
with open(mutant_history_path, "w+") as fw:
|
|
with open(mutant_history_path, "w+") as fw:
|
|
for mutant in mutant_history:
|
|
for mutant in mutant_history:
|
|
fw.write("{}\n".format(mutant))
|
|
fw.write("{}\n".format(mutant))
|
|
@@ -96,33 +90,27 @@ def is_nan_or_inf(t):
|
|
|
|
|
|
|
|
|
|
def continue_checker(**run_stat): # 判断算法是否满足退出条件
|
|
def continue_checker(**run_stat): # 判断算法是否满足退出条件
|
|
- start_time = run_stat["start_time"]
|
|
|
|
- time_limitation = run_stat["time_limit"]
|
|
|
|
- cur_counters = run_stat["cur_counters"]
|
|
|
|
- counters_limit = run_stat["counters_limit"]
|
|
|
|
- s_mode = run_stat["stop_mode"]
|
|
|
|
|
|
+ start_time = run_stat['start_time']
|
|
|
|
+ time_limitation = run_stat['time_limit']
|
|
|
|
+ cur_counters = run_stat['cur_counters']
|
|
|
|
+ counters_limit = run_stat['counters_limit']
|
|
|
|
+ s_mode = run_stat['stop_mode']
|
|
|
|
|
|
# if timing
|
|
# if timing
|
|
# 时间限制
|
|
# 时间限制
|
|
- if s_mode == "TIMING":
|
|
|
|
- hours, minutes, seconds = utils.ToolUtils.get_HH_mm_ss(
|
|
|
|
- datetime.datetime.now() - start_time
|
|
|
|
- )
|
|
|
|
|
|
+ if s_mode == 'TIMING':
|
|
|
|
+ hours, minutes, seconds = utils.ToolUtils.get_HH_mm_ss(datetime.datetime.now() - start_time)
|
|
total_minutes = hours * 60 + minutes
|
|
total_minutes = hours * 60 + minutes
|
|
- mutate_logger.info(
|
|
|
|
- f"INFO: Mutation progress: {total_minutes}/{time_limitation} Minutes!"
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.info(f"INFO: Mutation progress: {total_minutes}/{time_limitation} Minutes!")
|
|
if total_minutes < time_limitation:
|
|
if total_minutes < time_limitation:
|
|
return True
|
|
return True
|
|
else:
|
|
else:
|
|
return False
|
|
return False
|
|
# if counters
|
|
# if counters
|
|
# 次数限制,size(models)<N
|
|
# 次数限制,size(models)<N
|
|
- elif s_mode == "COUNTER":
|
|
|
|
|
|
+ elif s_mode == 'COUNTER':
|
|
if cur_counters < counters_limit:
|
|
if cur_counters < counters_limit:
|
|
- mutate_logger.info(
|
|
|
|
- "INFO: Mutation progress {}/{}".format(cur_counters + 1, counters_limit)
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.info("INFO: Mutation progress {}/{}".format(cur_counters + 1, counters_limit))
|
|
return True
|
|
return True
|
|
else:
|
|
else:
|
|
return False
|
|
return False
|
|
@@ -142,9 +130,7 @@ def calc_inner_div(model):
|
|
return len(longest_path) / len(graph)
|
|
return len(longest_path) / len(graph)
|
|
|
|
|
|
|
|
|
|
-def _generate_and_predict(
|
|
|
|
- res_dict, filename, mutate_num, mutate_ops, test_size, exp, backends
|
|
|
|
-):
|
|
|
|
|
|
+def _generate_and_predict(res_dict, filename, mutate_num, mutate_ops, test_size, exp, backends):
|
|
# 主算法函数
|
|
# 主算法函数
|
|
"""
|
|
"""
|
|
Generate models using mutate operators and store them
|
|
Generate models using mutate operators and store them
|
|
@@ -153,31 +139,27 @@ def _generate_and_predict(
|
|
mutate_op_invalid_history = {k: 0 for k in mutate_ops}
|
|
mutate_op_invalid_history = {k: 0 for k in mutate_ops}
|
|
mutant_history = []
|
|
mutant_history = []
|
|
# get mutator selection strategy
|
|
# get mutator selection strategy
|
|
- if "svhn" in exp or "fashion2" in exp:
|
|
|
|
|
|
+ if 'svhn' in exp or 'fashion2' in exp:
|
|
origin_model_name = "{}_origin0.hdf5".format(exp)
|
|
origin_model_name = "{}_origin0.hdf5".format(exp)
|
|
else:
|
|
else:
|
|
origin_model_name = "{}_origin0.h5".format(exp)
|
|
origin_model_name = "{}_origin0.h5".format(exp)
|
|
# 初始种子模型列表Ms初始时只有这一个模型
|
|
# 初始种子模型列表Ms初始时只有这一个模型
|
|
root_dir = os.path.dirname(os.getcwd())
|
|
root_dir = os.path.dirname(os.getcwd())
|
|
-
|
|
|
|
|
|
+
|
|
origin_save_path = os.path.join(mut_dir, origin_model_name)
|
|
origin_save_path = os.path.join(mut_dir, origin_model_name)
|
|
- mutator_selector_func, mutant_selector_func = get_selector_by_startegy_name(
|
|
|
|
- mutator_strategy, mutant_strategy
|
|
|
|
- )
|
|
|
|
|
|
+ mutator_selector_func, mutant_selector_func = get_selector_by_startegy_name(mutator_strategy, mutant_strategy)
|
|
# [origin_model_name] means seed pool only contains initial model at beginning.
|
|
# [origin_model_name] means seed pool only contains initial model at beginning.
|
|
- mutator_selector, mutant_selector = mutator_selector_func(
|
|
|
|
- mutate_ops
|
|
|
|
- ), mutant_selector_func([origin_model_name], capacity=mutate_num + 1)
|
|
|
|
|
|
+ mutator_selector, mutant_selector = mutator_selector_func(mutate_ops), mutant_selector_func([origin_model_name],
|
|
|
|
+ capacity=mutate_num + 1)
|
|
# MCMC,Roulette
|
|
# MCMC,Roulette
|
|
shutil.copy(src=filename, dst=origin_save_path)
|
|
shutil.copy(src=filename, dst=origin_save_path)
|
|
- origin_model_status, res_dict, accumulative_inconsistency, _ = get_model_prediction(
|
|
|
|
- res_dict, origin_save_path, origin_model_name, exp, test_size, backends
|
|
|
|
- )
|
|
|
|
|
|
+ origin_model_status, res_dict, accumulative_inconsistency, _ = get_model_prediction(res_dict,
|
|
|
|
+ origin_save_path,
|
|
|
|
+ origin_model_name, exp,
|
|
|
|
+ test_size, backends)
|
|
|
|
|
|
if not origin_model_status:
|
|
if not origin_model_status:
|
|
- mutate_logger.error(
|
|
|
|
- f"Origin model {exp} crashed on some backends! LEMON would skip it"
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.error(f"Origin model {exp} crashed on some backends! LEMON would skip it")
|
|
sys.exit(-1)
|
|
sys.exit(-1)
|
|
|
|
|
|
last_used_mutator = None
|
|
last_used_mutator = None
|
|
@@ -186,39 +168,27 @@ def _generate_and_predict(
|
|
|
|
|
|
start_time = datetime.datetime.now()
|
|
start_time = datetime.datetime.now()
|
|
order_inconsistency_dict = {}
|
|
order_inconsistency_dict = {}
|
|
- run_stat = {
|
|
|
|
- "start_time": start_time,
|
|
|
|
- "time_limit": time_limit,
|
|
|
|
- "cur_counters": mutant_counter,
|
|
|
|
- "counters_limit": mutate_num,
|
|
|
|
- "stop_mode": stop_mode,
|
|
|
|
- }
|
|
|
|
|
|
+ run_stat = {'start_time': start_time, 'time_limit': time_limit, 'cur_counters': mutant_counter,
|
|
|
|
+ 'counters_limit': mutate_num, 'stop_mode': stop_mode}
|
|
|
|
|
|
# 满足限制条件就继续循环
|
|
# 满足限制条件就继续循环
|
|
while continue_checker(**run_stat):
|
|
while continue_checker(**run_stat):
|
|
global model_num
|
|
global model_num
|
|
if model_num == mutate_num:
|
|
if model_num == mutate_num:
|
|
break
|
|
break
|
|
- picked_seed = utils.ToolUtils.select_mutant(
|
|
|
|
- mutant_selector
|
|
|
|
- ) # 轮盘赌选择种子模型(伪代码3-14行)
|
|
|
|
- selected_op = utils.ToolUtils.select_mutator(
|
|
|
|
- mutator_selector, last_used_mutator=last_used_mutator
|
|
|
|
- ) # 蒙特卡洛选择变异算子(伪代码15-20行)
|
|
|
|
|
|
+ picked_seed = utils.ToolUtils.select_mutant(mutant_selector) # 轮盘赌选择种子模型(伪代码3-14行)
|
|
|
|
+ selected_op = utils.ToolUtils.select_mutator(mutator_selector,
|
|
|
|
+ last_used_mutator=last_used_mutator) # 蒙特卡洛选择变异算子(伪代码15-20行)
|
|
mutate_op_history[selected_op] += 1
|
|
mutate_op_history[selected_op] += 1
|
|
last_used_mutator = selected_op
|
|
last_used_mutator = selected_op
|
|
mutator = mutator_selector.mutators[selected_op] # 变异算子对象
|
|
mutator = mutator_selector.mutators[selected_op] # 变异算子对象
|
|
mutant = mutant_selector.mutants[picked_seed] # 种子模型对象
|
|
mutant = mutant_selector.mutants[picked_seed] # 种子模型对象
|
|
|
|
|
|
- if "svhn" in picked_seed or "fashion2" in picked_seed:
|
|
|
|
- new_seed_name = "{}-{}{}.hdf5".format(
|
|
|
|
- picked_seed[:-5], selected_op, mutate_op_history[selected_op]
|
|
|
|
- )
|
|
|
|
|
|
+ if 'svhn' in picked_seed or 'fashion2' in picked_seed:
|
|
|
|
+ new_seed_name = "{}-{}{}.hdf5".format(picked_seed[:-5], selected_op, mutate_op_history[selected_op])
|
|
|
|
|
|
else:
|
|
else:
|
|
- new_seed_name = "{}-{}{}.h5".format(
|
|
|
|
- picked_seed[:-3], selected_op, mutate_op_history[selected_op]
|
|
|
|
- ) # 生成新模型
|
|
|
|
|
|
+ new_seed_name = "{}-{}{}.h5".format(picked_seed[:-3], selected_op, mutate_op_history[selected_op]) # 生成新模型
|
|
# seed name would not be duplicate
|
|
# seed name would not be duplicate
|
|
if new_seed_name not in mutant_selector.mutants.keys():
|
|
if new_seed_name not in mutant_selector.mutants.keys():
|
|
# 对应伪代码22行,因为种子模型是以当前选择的种子模型和变异算子命名的,所以重名就表示这个模型已经存在了
|
|
# 对应伪代码22行,因为种子模型是以当前选择的种子模型和变异算子命名的,所以重名就表示这个模型已经存在了
|
|
@@ -226,78 +196,54 @@ def _generate_and_predict(
|
|
picked_seed_path = os.path.join(mut_dir, picked_seed)
|
|
picked_seed_path = os.path.join(mut_dir, picked_seed)
|
|
mutate_st = datetime.datetime.now()
|
|
mutate_st = datetime.datetime.now()
|
|
|
|
|
|
- model_mutation_generators = (
|
|
|
|
- root_dir + "/scripts/mutation/model_mutation_generators.py"
|
|
|
|
- )
|
|
|
|
- mutate_status = os.system(
|
|
|
|
- "{}/lemon/bin/python -u {} --model {} "
|
|
|
|
- "--mutate_op {} --save_path {} --mutate_ratio {}".format(
|
|
|
|
- python_prefix,
|
|
|
|
- model_mutation_generators,
|
|
|
|
- picked_seed_path,
|
|
|
|
- selected_op,
|
|
|
|
- new_seed_path,
|
|
|
|
- flags.mutate_ratio,
|
|
|
|
- )
|
|
|
|
- )
|
|
|
|
|
|
+ model_mutation_generators = root_dir + "/scripts/mutation/model_mutation_generators.py"
|
|
|
|
+ mutate_status = os.system("{}/lemon/bin/python -u {} --model {} "
|
|
|
|
+ "--mutate_op {} --save_path {} --mutate_ratio {}".format(python_prefix,
|
|
|
|
+ model_mutation_generators,
|
|
|
|
+ picked_seed_path,
|
|
|
|
+ selected_op,
|
|
|
|
+ new_seed_path,
|
|
|
|
+ flags.mutate_ratio))
|
|
# 使用变异算子进行变异(伪代码21行)
|
|
# 使用变异算子进行变异(伪代码21行)
|
|
|
|
|
|
mutate_et = datetime.datetime.now()
|
|
mutate_et = datetime.datetime.now()
|
|
mutate_dt = mutate_et - mutate_st
|
|
mutate_dt = mutate_et - mutate_st
|
|
h, m, s = utils.ToolUtils.get_HH_mm_ss(mutate_dt)
|
|
h, m, s = utils.ToolUtils.get_HH_mm_ss(mutate_dt)
|
|
- mutate_logger.info(
|
|
|
|
- "INFO:Mutate Time Used on {} : {}h, {}m, {}s".format(
|
|
|
|
- selected_op, h, m, s
|
|
|
|
- )
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.info("INFO:Mutate Time Used on {} : {}h, {}m, {}s".format(selected_op, h, m, s))
|
|
# mutation status code is successful
|
|
# mutation status code is successful
|
|
|
|
|
|
if mutate_status == 0: # 变异执行完成
|
|
if mutate_status == 0: # 变异执行完成
|
|
mutant.selected += 1
|
|
mutant.selected += 1
|
|
mutator.total += 1
|
|
mutator.total += 1
|
|
# execute this model on all platforms
|
|
# execute this model on all platforms
|
|
- predict_status, res_dict, accumulative_inconsistency, model_outputs = (
|
|
|
|
- get_model_prediction(
|
|
|
|
- res_dict, new_seed_path, new_seed_name, exp, test_size, backends
|
|
|
|
- )
|
|
|
|
- )
|
|
|
|
|
|
+ predict_status, res_dict, accumulative_inconsistency, model_outputs = \
|
|
|
|
+ get_model_prediction(res_dict, new_seed_path, new_seed_name, exp, test_size, backends)
|
|
# 计算ACC(m)
|
|
# 计算ACC(m)
|
|
|
|
|
|
if predict_status:
|
|
if predict_status:
|
|
mutant_history.append(new_seed_name)
|
|
mutant_history.append(new_seed_name)
|
|
# 伪代码23-25行
|
|
# 伪代码23-25行
|
|
- print("type:", type(model_outputs))
|
|
|
|
- print("model_outputs:", model_outputs)
|
|
|
|
|
|
+ print('type:', type(model_outputs))
|
|
|
|
+ print('model_outputs:',model_outputs)
|
|
|
|
|
|
- if utils.ModelUtils.is_valid_model(
|
|
|
|
- inputs_backends=model_outputs, backends_nums=len(backends)
|
|
|
|
- ):
|
|
|
|
|
|
+ if utils.ModelUtils.is_valid_model(inputs_backends=model_outputs, backends_nums=len(backends)):
|
|
|
|
|
|
- delta = (
|
|
|
|
- accumulative_inconsistency - last_inconsistency
|
|
|
|
- ) # 也就是ACC(m)-ACC(s)
|
|
|
|
|
|
+ delta = accumulative_inconsistency - last_inconsistency # 也就是ACC(m)-ACC(s)
|
|
# 下面两个if好像没什么用,因为mutator字典里只有MCMC,mutant字典里只有ROULETTE
|
|
# 下面两个if好像没什么用,因为mutator字典里只有MCMC,mutant字典里只有ROULETTE
|
|
|
|
|
|
- if mutator_strategy == "MCMC":
|
|
|
|
- mutator.delta_bigger_than_zero = (
|
|
|
|
- mutator.delta_bigger_than_zero + 1
|
|
|
|
- if delta > 0
|
|
|
|
- else mutator.delta_bigger_than_zero
|
|
|
|
- )
|
|
|
|
|
|
+ if mutator_strategy == 'MCMC':
|
|
|
|
+ mutator.delta_bigger_than_zero = mutator.delta_bigger_than_zero + 1 \
|
|
|
|
+ if delta > 0 else mutator.delta_bigger_than_zero
|
|
|
|
|
|
- if mutant_strategy == "ROULETTE" and delta > 0:
|
|
|
|
|
|
+ if mutant_strategy == 'ROULETTE' and delta > 0:
|
|
# when size >= capacity:
|
|
# when size >= capacity:
|
|
# random_mutant & Roulette would drop one and add new one
|
|
# random_mutant & Roulette would drop one and add new one
|
|
if mutant_selector.is_full():
|
|
if mutant_selector.is_full():
|
|
mutant_selector.pop_one_mutant()
|
|
mutant_selector.pop_one_mutant()
|
|
- mutant_selector.add_mutant(
|
|
|
|
- new_seed_name
|
|
|
|
- ) # 如果放大了不一致程度,即ACC(m)>=ACC(s),就加入到种子模型集合里
|
|
|
|
|
|
+ mutant_selector.add_mutant(new_seed_name) # 如果放大了不一致程度,即ACC(m)>=ACC(s),就加入到种子模型集合里
|
|
last_inconsistency = accumulative_inconsistency # 29行
|
|
last_inconsistency = accumulative_inconsistency # 29行
|
|
|
|
|
|
- mutate_logger.info(
|
|
|
|
- "SUCCESS:{} pass testing!".format(new_seed_name)
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.info("SUCCESS:{} pass testing!".format(new_seed_name))
|
|
mutant_counter += 1
|
|
mutant_counter += 1
|
|
else:
|
|
else:
|
|
mutate_op_invalid_history[selected_op] += 1
|
|
mutate_op_invalid_history[selected_op] += 1
|
|
@@ -305,11 +251,7 @@ def _generate_and_predict(
|
|
else:
|
|
else:
|
|
mutate_logger.error("Crashed or NaN model Found!")
|
|
mutate_logger.error("Crashed or NaN model Found!")
|
|
else:
|
|
else:
|
|
- mutate_logger.error(
|
|
|
|
- "Exception raised when mutate {} with {}".format(
|
|
|
|
- picked_seed, selected_op
|
|
|
|
- )
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.error("Exception raised when mutate {} with {}".format(picked_seed, selected_op))
|
|
|
|
|
|
mutate_logger.info("Mutated op used history:")
|
|
mutate_logger.info("Mutated op used history:")
|
|
mutate_logger.info(mutate_op_history)
|
|
mutate_logger.info(mutate_op_history)
|
|
@@ -317,7 +259,7 @@ def _generate_and_predict(
|
|
mutate_logger.info("Invalid mutant generated history:")
|
|
mutate_logger.info("Invalid mutant generated history:")
|
|
mutate_logger.info(mutate_op_invalid_history)
|
|
mutate_logger.info(mutate_op_invalid_history)
|
|
|
|
|
|
- run_stat["cur_counters"] = mutant_counter
|
|
|
|
|
|
+ run_stat['cur_counters'] = mutant_counter
|
|
|
|
|
|
save_mutate_history(mutator_selector, mutate_op_invalid_history, mutant_history)
|
|
save_mutate_history(mutator_selector, mutate_op_invalid_history, mutant_history)
|
|
|
|
|
|
@@ -359,16 +301,12 @@ def generate_metrics_result(res_dict, predict_output, model_idntfr): # 计算AC
|
|
for metrics_name, metrics_result_dict in res_dict.items():
|
|
for metrics_name, metrics_result_dict in res_dict.items():
|
|
metrics_func = utils.MetricsUtils.get_metrics_by_name(metrics_name) # 计算
|
|
metrics_func = utils.MetricsUtils.get_metrics_by_name(metrics_name) # 计算
|
|
# metrics_results in list type
|
|
# metrics_results in list type
|
|
- metrics_results = metrics_func(
|
|
|
|
- prediction1, prediction2, y_test[: flags.test_size]
|
|
|
|
- )
|
|
|
|
|
|
+ metrics_results = metrics_func(prediction1, prediction2, y_test[:flags.test_size])
|
|
# 一共test_size个数据集,所以metrics_result是长度为test_size的预测结果列表
|
|
# 一共test_size个数据集,所以metrics_result是长度为test_size的预测结果列表
|
|
# ACC -> float: The sum of all inputs under all backends
|
|
# ACC -> float: The sum of all inputs under all backends
|
|
accumulative_incons += sum(metrics_results) # ACC=∑
|
|
accumulative_incons += sum(metrics_results) # ACC=∑
|
|
for input_idx, delta in enumerate(metrics_results):
|
|
for input_idx, delta in enumerate(metrics_results):
|
|
- delta_key = "{}_{}_{}_input{}".format(
|
|
|
|
- model_idntfr, bk_name1, bk_name2, input_idx
|
|
|
|
- )
|
|
|
|
|
|
+ delta_key = "{}_{}_{}_input{}".format(model_idntfr, bk_name1, bk_name2, input_idx)
|
|
metrics_result_dict[delta_key] = delta
|
|
metrics_result_dict[delta_key] = delta
|
|
|
|
|
|
mutate_logger.info(f"Accumulative Inconsistency: {accumulative_incons}")
|
|
mutate_logger.info(f"Accumulative Inconsistency: {accumulative_incons}")
|
|
@@ -387,9 +325,7 @@ def generate_theta(predict_output, backends):
|
|
theta_res = {bk: 0 for bk in backends}
|
|
theta_res = {bk: 0 for bk in backends}
|
|
for pair in predict_output.items():
|
|
for pair in predict_output.items():
|
|
bk_name, prediction = pair
|
|
bk_name, prediction = pair
|
|
- theta_res[bk_name] = utils.MetricsUtils.get_theta_mean(
|
|
|
|
- prediction, y_test[: flags.test_size]
|
|
|
|
- )
|
|
|
|
|
|
+ theta_res[bk_name] = utils.MetricsUtils.get_theta_mean(prediction, y_test[:flags.test_size])
|
|
return theta_res
|
|
return theta_res
|
|
|
|
|
|
|
|
|
|
@@ -404,10 +340,8 @@ def get_model_prediction(res_dict, model_path, model_name, exp, test_size, backe
|
|
"""
|
|
"""
|
|
|
|
|
|
root_dir = model_path.split("origin_model")[0]
|
|
root_dir = model_path.split("origin_model")[0]
|
|
-
|
|
|
|
- npy_path = (
|
|
|
|
- root_dir + "res.npy"
|
|
|
|
- ) # 保存模型预测结果的路径,patch_prediction_extractor.py中的44行改成一样的路径
|
|
|
|
|
|
+
|
|
|
|
+ npy_path = root_dir + 'res.npy' # 保存模型预测结果的路径,patch_prediction_extractor.py中的44行改成一样的路径
|
|
|
|
|
|
predict_output = {b: [] for b in backends}
|
|
predict_output = {b: [] for b in backends}
|
|
model_idntfr = model_name[:-3]
|
|
model_idntfr = model_name[:-3]
|
|
@@ -416,18 +350,14 @@ def get_model_prediction(res_dict, model_path, model_name, exp, test_size, backe
|
|
python_bin = f"{python_prefix}/{bk}/bin/python"
|
|
python_bin = f"{python_prefix}/{bk}/bin/python"
|
|
predict_st = datetime.datetime.now()
|
|
predict_st = datetime.datetime.now()
|
|
# 使用不同的库进行预测
|
|
# 使用不同的库进行预测
|
|
- pre_status_bk = os.system(
|
|
|
|
- f"{python_bin} -u -m patch_prediction_extractor --backend {bk} "
|
|
|
|
- f"--exp {exp} --test_size {test_size} --model {model_path} "
|
|
|
|
- f"--config_name {flags.config_name}"
|
|
|
|
- )
|
|
|
|
|
|
+ pre_status_bk = os.system(f"{python_bin} -u -m patch_prediction_extractor --backend {bk} "
|
|
|
|
+ f"--exp {exp} --test_size {test_size} --model {model_path} "
|
|
|
|
+ f"--config_name {flags.config_name}")
|
|
|
|
|
|
predict_et = datetime.datetime.now()
|
|
predict_et = datetime.datetime.now()
|
|
predict_td = predict_et - predict_st
|
|
predict_td = predict_et - predict_st
|
|
h, m, s = utils.ToolUtils.get_HH_mm_ss(predict_td)
|
|
h, m, s = utils.ToolUtils.get_HH_mm_ss(predict_td)
|
|
- mutate_logger.info(
|
|
|
|
- "Prediction Time Used on {} : {}h, {}m, {}s".format(bk, h, m, s)
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.info("Prediction Time Used on {} : {}h, {}m, {}s".format(bk, h, m, s))
|
|
|
|
|
|
# If no exception is thrown,save prediction result
|
|
# If no exception is thrown,save prediction result
|
|
if pre_status_bk == 0: # 预测执行成功,保存结果
|
|
if pre_status_bk == 0: # 预测执行成功,保存结果
|
|
@@ -438,21 +368,16 @@ def get_model_prediction(res_dict, model_path, model_name, exp, test_size, backe
|
|
# record the crashed backend
|
|
# record the crashed backend
|
|
else:
|
|
else:
|
|
all_backends_predict_status = False
|
|
all_backends_predict_status = False
|
|
- mutate_logger.error(
|
|
|
|
- "{} crash on backend {} when predicting ".format(model_name, bk)
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.error("{} crash on backend {} when predicting ".format(model_name, bk))
|
|
|
|
|
|
status = False
|
|
status = False
|
|
accumulative_incons = None
|
|
accumulative_incons = None
|
|
|
|
|
|
# run ok on all platforms
|
|
# run ok on all platforms
|
|
- if (
|
|
|
|
- all_backends_predict_status
|
|
|
|
- ): # 所有的库都执行成功且保存了结果,判断结果中是否有错误
|
|
|
|
|
|
+ if all_backends_predict_status: # 所有的库都执行成功且保存了结果,判断结果中是否有错误
|
|
predictions = list(predict_output.values())
|
|
predictions = list(predict_output.values())
|
|
- res_dict, accumulative_incons = generate_metrics_result(
|
|
|
|
- res_dict=res_dict, predict_output=predict_output, model_idntfr=model_idntfr
|
|
|
|
- )
|
|
|
|
|
|
+ res_dict, accumulative_incons = generate_metrics_result(res_dict=res_dict, predict_output=predict_output,
|
|
|
|
+ model_idntfr=model_idntfr)
|
|
# 计算ACC(用于衡量预测结果的不一致程度)
|
|
# 计算ACC(用于衡量预测结果的不一致程度)
|
|
# gini_res = generate_gini_result(predict_output=predict_output, backends=backends)
|
|
# gini_res = generate_gini_result(predict_output=predict_output, backends=backends)
|
|
# theta = generate_theta(predict_output=predict_output, backends=backends)
|
|
# theta = generate_theta(predict_output=predict_output, backends=backends)
|
|
@@ -477,21 +402,16 @@ def get_model_prediction(res_dict, model_path, model_name, exp, test_size, backe
|
|
|
|
|
|
# has NaN on all backends --> not a NaN bug
|
|
# has NaN on all backends --> not a NaN bug
|
|
else:
|
|
else:
|
|
- nan_model_path = os.path.join(
|
|
|
|
- nan_dir, f"{model_idntfr}_NaN_on_all_backends.h5"
|
|
|
|
- )
|
|
|
|
- mutate_logger.error(
|
|
|
|
- "Error: Found one NaN Model on all libraries. move NAN model"
|
|
|
|
- )
|
|
|
|
|
|
+ nan_model_path = os.path.join(nan_dir, f"{model_idntfr}_NaN_on_all_backends.h5")
|
|
|
|
+ mutate_logger.error("Error: Found one NaN Model on all libraries. move NAN model")
|
|
shutil.move(model_path, nan_model_path)
|
|
shutil.move(model_path, nan_model_path)
|
|
|
|
|
|
else: # No NaN or INF on any backend
|
|
else: # No NaN or INF on any backend
|
|
- print(model_path)
|
|
|
|
|
|
+ # print(model_path)
|
|
for bk in backends:
|
|
for bk in backends:
|
|
python_bin = f"{python_prefix}/{bk}/bin/python"
|
|
python_bin = f"{python_prefix}/{bk}/bin/python"
|
|
- os.system(
|
|
|
|
- f"{python_bin} -u -m model_to_txt --backend {bk} --model_path {model_path} --root_dir {root_dir}"
|
|
|
|
- )
|
|
|
|
|
|
+ os.system(f"{python_bin} -u -m model_to_txt --backend {bk} --model_path {model_path} --root_dir {root_dir}")
|
|
|
|
+ os.system(f"{python_bin} -u -m draw_result --backend {bk} --model_path {model_path}")
|
|
# if 'svhn' in model_name or 'fashion2' in model_name:
|
|
# if 'svhn' in model_name or 'fashion2' in model_name:
|
|
# file_path = os.path.join(folder_path, model_path.split("\\")[-1][:-5] + '.json')
|
|
# file_path = os.path.join(folder_path, model_path.split("\\")[-1][:-5] + '.json')
|
|
# else:
|
|
# else:
|
|
@@ -505,9 +425,7 @@ def get_model_prediction(res_dict, model_path, model_name, exp, test_size, backe
|
|
# tar_set = set(sub_info['layer_type'])
|
|
# tar_set = set(sub_info['layer_type'])
|
|
|
|
|
|
mutate_logger.info("Saving prediction")
|
|
mutate_logger.info("Saving prediction")
|
|
- with open(
|
|
|
|
- "{}/prediction_{}.pkl".format(inner_output_dir, model_idntfr), "wb+"
|
|
|
|
- ) as f:
|
|
|
|
|
|
+ with open("{}/prediction_{}.pkl".format(inner_output_dir, model_idntfr), "wb+") as f:
|
|
pickle.dump(predict_output, file=f)
|
|
pickle.dump(predict_output, file=f)
|
|
status = True
|
|
status = True
|
|
|
|
|
|
@@ -529,44 +447,15 @@ if __name__ == "__main__":
|
|
It could make mutate_lemon.py run independently without relying on mutation_executor.py
|
|
It could make mutate_lemon.py run independently without relying on mutation_executor.py
|
|
"""
|
|
"""
|
|
parse = argparse.ArgumentParser()
|
|
parse = argparse.ArgumentParser()
|
|
- parse.add_argument(
|
|
|
|
- "--is_mutate",
|
|
|
|
- type=ast.literal_eval,
|
|
|
|
- default=False,
|
|
|
|
- help="parameter to determine mutation option",
|
|
|
|
- )
|
|
|
|
- parse.add_argument(
|
|
|
|
- "--mutate_op",
|
|
|
|
- type=str,
|
|
|
|
- nargs="+",
|
|
|
|
- choices=[
|
|
|
|
- "WS",
|
|
|
|
- "GF",
|
|
|
|
- "NEB",
|
|
|
|
- "NAI",
|
|
|
|
- "NS",
|
|
|
|
- "ARem",
|
|
|
|
- "ARep",
|
|
|
|
- "LA",
|
|
|
|
- "LC",
|
|
|
|
- "LR",
|
|
|
|
- "LS",
|
|
|
|
- "MLA",
|
|
|
|
- ],
|
|
|
|
- help="parameter to determine mutation option",
|
|
|
|
- )
|
|
|
|
- parse.add_argument(
|
|
|
|
- "--model", type=str, help="relative path of model file(from root dir)"
|
|
|
|
- )
|
|
|
|
- parse.add_argument(
|
|
|
|
- "--output_dir", type=str, help="relative path of output dir(from root dir)"
|
|
|
|
- )
|
|
|
|
- parse.add_argument("--backends", type=str, nargs="+", help="list of backends")
|
|
|
|
- parse.add_argument(
|
|
|
|
- "--mutate_num",
|
|
|
|
- type=int,
|
|
|
|
- help="number of variant models generated by each mutation operator",
|
|
|
|
- )
|
|
|
|
|
|
+ parse.add_argument("--is_mutate", type=ast.literal_eval, default=False,
|
|
|
|
+ help="parameter to determine mutation option")
|
|
|
|
+ parse.add_argument("--mutate_op", type=str, nargs='+',
|
|
|
|
+ choices=['WS', 'GF', 'NEB', 'NAI', 'NS', 'ARem', 'ARep', 'LA', 'LC', 'LR', 'LS', 'MLA']
|
|
|
|
+ , help="parameter to determine mutation option")
|
|
|
|
+ parse.add_argument("--model", type=str, help="relative path of model file(from root dir)")
|
|
|
|
+ parse.add_argument("--output_dir", type=str, help="relative path of output dir(from root dir)")
|
|
|
|
+ parse.add_argument("--backends", type=str, nargs='+', help="list of backends")
|
|
|
|
+ parse.add_argument("--mutate_num", type=int, help="number of variant models generated by each mutation operator")
|
|
parse.add_argument("--mutate_ratio", type=float, help="ratio of mutation")
|
|
parse.add_argument("--mutate_ratio", type=float, help="ratio of mutation")
|
|
parse.add_argument("--exp", type=str, help="experiments identifiers")
|
|
parse.add_argument("--exp", type=str, help="experiments identifiers")
|
|
parse.add_argument("--test_size", type=int, help="amount of testing image")
|
|
parse.add_argument("--test_size", type=int, help="amount of testing image")
|
|
@@ -576,13 +465,13 @@ if __name__ == "__main__":
|
|
lemon_cfg = configparser.ConfigParser()
|
|
lemon_cfg = configparser.ConfigParser()
|
|
# lemon_cfg.read(f".\config\{flags.config_name}")
|
|
# lemon_cfg.read(f".\config\{flags.config_name}")
|
|
cfg_path = os.path.join(os.path.dirname(os.getcwd()), "config", flags.config_name)
|
|
cfg_path = os.path.join(os.path.dirname(os.getcwd()), "config", flags.config_name)
|
|
- lemon_cfg.read(cfg_path)
|
|
|
|
|
|
+ lemon_cfg.read(cfg_path)
|
|
# lemon_cfg.read(f"config/demo.conf")
|
|
# lemon_cfg.read(f"config/demo.conf")
|
|
- time_limit = lemon_cfg["parameters"].getint("time_limit")
|
|
|
|
- mutator_strategy = lemon_cfg["parameters"].get("mutator_strategy").upper()
|
|
|
|
- mutant_strategy = lemon_cfg["parameters"].get("mutant_strategy").upper()
|
|
|
|
- stop_mode = lemon_cfg["parameters"].get("stop_mode").upper()
|
|
|
|
- alpha = lemon_cfg["parameters"].getfloat("alpha")
|
|
|
|
|
|
+ time_limit = lemon_cfg['parameters'].getint("time_limit")
|
|
|
|
+ mutator_strategy = lemon_cfg['parameters'].get("mutator_strategy").upper()
|
|
|
|
+ mutant_strategy = lemon_cfg['parameters'].get("mutant_strategy").upper()
|
|
|
|
+ stop_mode = lemon_cfg['parameters'].get("stop_mode").upper()
|
|
|
|
+ alpha = lemon_cfg['parameters'].getfloat("alpha")
|
|
|
|
|
|
mutate_logger = Logger()
|
|
mutate_logger = Logger()
|
|
# pool = redis.ConnectionPool(host=lemon_cfg['redis']['host'], port=lemon_cfg['redis']['port'],
|
|
# pool = redis.ConnectionPool(host=lemon_cfg['redis']['host'], port=lemon_cfg['redis']['port'],
|
|
@@ -602,28 +491,17 @@ if __name__ == "__main__":
|
|
metrics_result_dir = os.path.join(experiment_dir, "metrics_result")
|
|
metrics_result_dir = os.path.join(experiment_dir, "metrics_result")
|
|
|
|
|
|
x, y = utils.DataUtils.get_data_by_exp(flags.exp) # 从conf文件中读取数据并转换形式
|
|
x, y = utils.DataUtils.get_data_by_exp(flags.exp) # 从conf文件中读取数据并转换形式
|
|
- x_test, y_test = x[: flags.test_size], y[: flags.test_size]
|
|
|
|
- pool_size = lemon_cfg["parameters"].getint("pool_size")
|
|
|
|
- python_prefix = lemon_cfg["parameters"]["python_prefix"].rstrip("\\")
|
|
|
|
|
|
+ x_test, y_test = x[:flags.test_size], y[:flags.test_size]
|
|
|
|
+ pool_size = lemon_cfg['parameters'].getint('pool_size')
|
|
|
|
+ python_prefix = lemon_cfg['parameters']['python_prefix'].rstrip("\\")
|
|
try: # 执行算法
|
|
try: # 执行算法
|
|
- metrics_list = lemon_cfg["parameters"]["metrics"].split(" ") # D_MAD
|
|
|
|
|
|
+ metrics_list = lemon_cfg['parameters']['metrics'].split(" ") # D_MAD
|
|
lemon_results = {k: dict() for k in metrics_list}
|
|
lemon_results = {k: dict() for k in metrics_list}
|
|
- lemon_results = _generate_and_predict(
|
|
|
|
- lemon_results,
|
|
|
|
- flags.model,
|
|
|
|
- flags.mutate_num,
|
|
|
|
- flags.mutate_op,
|
|
|
|
- flags.test_size,
|
|
|
|
- flags.exp,
|
|
|
|
- flags.backends,
|
|
|
|
- )
|
|
|
|
- with open(
|
|
|
|
- "{}/{}_lemon_results.pkl".format(experiment_dir, flags.exp), "wb+"
|
|
|
|
- ) as f:
|
|
|
|
|
|
+ lemon_results = _generate_and_predict(lemon_results, flags.model, flags.mutate_num, flags.mutate_op,
|
|
|
|
+ flags.test_size, flags.exp, flags.backends)
|
|
|
|
+ with open("{}/{}_lemon_results.pkl".format(experiment_dir, flags.exp), "wb+") as f:
|
|
pickle.dump(lemon_results, file=f)
|
|
pickle.dump(lemon_results, file=f)
|
|
- utils.MetricsUtils.generate_result_by_metrics(
|
|
|
|
- metrics_list, lemon_results, metrics_result_dir, flags.exp
|
|
|
|
- )
|
|
|
|
|
|
+ utils.MetricsUtils.generate_result_by_metrics(metrics_list, lemon_results, metrics_result_dir, flags.exp)
|
|
|
|
|
|
except Exception as e:
|
|
except Exception as e:
|
|
mutate_logger.exception(sys.exc_info())
|
|
mutate_logger.exception(sys.exc_info())
|
|
@@ -635,6 +513,6 @@ if __name__ == "__main__":
|
|
endtime = datetime.datetime.now()
|
|
endtime = datetime.datetime.now()
|
|
time_delta = endtime - starttime
|
|
time_delta = endtime - starttime
|
|
h, m, s = utils.ToolUtils.get_HH_mm_ss(time_delta)
|
|
h, m, s = utils.ToolUtils.get_HH_mm_ss(time_delta)
|
|
- mutate_logger.info(
|
|
|
|
- "Mutation process is done: Time used: {} hour,{} min,{} sec".format(h, m, s)
|
|
|
|
- )
|
|
|
|
|
|
+ mutate_logger.info("Mutation process is done: Time used: {} hour,{} min,{} sec".format(h, m, s))
|
|
|
|
+
|
|
|
|
+
|