ga_error_test.py 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493
  1. # usage: python driving_models.py 1 0 - train the dave-orig model
  2. from __future__ import print_function
  3. import csv
  4. import shutil
  5. import sys
  6. import os
  7. sys.path.append("..")
  8. from data_utils import *
  9. import pandas as pd
  10. # add tensorflow.
  11. from tensorflow.keras.layers import Convolution2D, Input, Dense, Flatten, Lambda, MaxPooling2D, Dropout, Activation, \
  12. SpatialDropout2D
  13. # remove merge
  14. from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
  15. from tensorflow.keras import models, optimizers, backend
  16. import matplotlib.pyplot as plt
  17. import tensorflow as tf
  18. import numpy as np
  19. import time
  20. def Dave_orig(input_tensor=None, load_weights=False): # original dave
  21. if input_tensor is None:
  22. input_tensor = Input(shape=(100, 100, 3))
  23. x = Convolution2D(24, (5, 5), padding='valid', activation='relu', strides=(2, 2), name='block1_conv1')(input_tensor)
  24. x = Convolution2D(36, (5, 5), padding='valid', activation='relu', strides=(2, 2), name='block1_conv2')(x)
  25. x = Convolution2D(48, (5, 5), padding='valid', activation='relu', strides=(2, 2), name='block1_conv3')(x)
  26. x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1), name='block1_conv4')(x)
  27. x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1), name='block1_conv5')(x)
  28. x = Flatten(name='flatten')(x)
  29. x = Dense(1164, activation='relu', name='fc1')(x)
  30. x = Dense(100, activation='relu', name='fc2')(x)
  31. x = Dense(50, activation='relu', name='fc3')(x)
  32. x = Dense(10, activation='relu', name='fc4')(x)
  33. x = Dense(1, name='before_prediction')(x)
  34. x = Lambda(atan_layer, output_shape=atan_layer_shape, name='prediction')(x)
  35. m = Model(input_tensor, x)
  36. if load_weights:
  37. m.load_weights('../trained_models/Model1.h5')
  38. # compiling
  39. m.compile(loss='mse', optimizer='Adam')
  40. # m.compile(loss=[rmse], optimizer='adadelta')
  41. return m
  42. def Dave_norminit(input_tensor=None, load_weights=False): # original dave with normal initialization
  43. if input_tensor is None:
  44. input_tensor = Input(shape=(100, 100, 3))
  45. x = Convolution2D(24, (5, 5), padding='valid', activation='relu', strides=(2, 2),
  46. name='block1_conv1')(input_tensor)
  47. x = Convolution2D(36, (5, 5), padding='valid', activation='relu', strides=(2, 2),
  48. name='block1_conv2')(x)
  49. x = Convolution2D(48, (5, 5), padding='valid', activation='relu', strides=(2, 2),
  50. name='block1_conv3')(x)
  51. x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1),
  52. name='block1_conv4')(x)
  53. x = Convolution2D(64, (3, 3), padding='valid', activation='relu', strides=(1, 1),
  54. name='block1_conv5')(x)
  55. x = Flatten(name='flatten')(x)
  56. x = Dense(1164, kernel_initializer=normal_init, activation='relu', name='fc1')(x)
  57. x = Dense(100, kernel_initializer=normal_init, activation='relu', name='fc2')(x)
  58. x = Dense(50, kernel_initializer=normal_init, activation='relu', name='fc3')(x)
  59. x = Dense(10, kernel_initializer=normal_init, activation='relu', name='fc4')(x)
  60. x = Dense(1, name='before_prediction')(x)
  61. x = Lambda(atan_layer, output_shape=atan_layer_shape, name='prediction')(x)
  62. m = Model(input_tensor, x)
  63. if load_weights:
  64. m.load_weights('../trained_models/Model2.h5')
  65. # compiling
  66. m.compile(loss='mse', optimizer='Adam')
  67. # m.compile(loss=[rmse], optimizer='adadelta')
  68. return m
  69. def Dave_dropout(input_tensor=None, load_weights=False): # simplified dave
  70. if input_tensor is None:
  71. input_tensor = Input(shape=(100, 100, 3))
  72. x = Convolution2D(16, (3, 3), padding='valid', activation='relu', name='block1_conv1')(input_tensor)
  73. x = MaxPooling2D(pool_size=(2, 2), name='block1_pool1')(x)
  74. x = Convolution2D(32, (3, 3), padding='valid', activation='relu', name='block1_conv2')(x)
  75. x = MaxPooling2D(pool_size=(2, 2), name='block1_pool2')(x)
  76. x = Convolution2D(64, (3, 3), padding='valid', activation='relu', name='block1_conv3')(x)
  77. x = MaxPooling2D(pool_size=(2, 2), name='block1_pool3')(x)
  78. x = Flatten(name='flatten')(x)
  79. x = Dense(500, activation='relu', name='fc1')(x)
  80. x = Dropout(.5)(x)
  81. x = Dense(100, activation='relu', name='fc2')(x)
  82. x = Dropout(.25)(x)
  83. x = Dense(20, activation='relu', name='fc3')(x)
  84. x = Dense(1, name='before_prediction')(x)
  85. x = Lambda(atan_layer, output_shape=atan_layer_shape, name="prediction")(x)
  86. m = Model(input_tensor, x)
  87. if load_weights:
  88. m.load_weights('../trained_models/Model3.h5')
  89. # compiling
  90. m.compile(loss='mse', optimizer=optimizers.Adam(lr=1e-04))
  91. # m.compile(loss=[rmse], optimizer='adadelta')
  92. return m
  93. def Epoch_model(input_tensor=None, load_weights=False):
  94. if input_tensor is None:
  95. input_tensor = Input(shape=(128, 128, 3))
  96. x = Convolution2D(32, (3, 3), activation='relu', padding='same')(input_tensor)
  97. x = MaxPooling2D((2, 2), strides=(2, 2))(x)
  98. x = Dropout(0.25)(x)
  99. x = Convolution2D(64, (3, 3), activation='relu', padding='same')(x)
  100. x = MaxPooling2D((2, 2), strides=(2, 2))(x)
  101. x = Dropout(0.25)(x)
  102. x = Convolution2D(128, (3, 3), activation='relu', padding='same')(x)
  103. x = MaxPooling2D((2, 2), strides=(2, 2))(x)
  104. x = Dropout(0.5)(x)
  105. y = Flatten()(x)
  106. y = Dense(1024, activation='relu')(y)
  107. y = Dropout(.5)(y)
  108. y = Dense(1)(y)
  109. m = Model(input_tensor, y)
  110. if load_weights:
  111. m.load_weights('../trained_models/Model4.h5')
  112. # compliling
  113. m.compile(loss='mse', optimizer=optimizers.Adam(lr=1e-04))
  114. # m.compile(loss=[rmse], optimizer='adadelta')
  115. return m
  116. def rmse(y_true, y_pred): # used for loss metric, output tensor
  117. '''Calculates RMSE
  118. '''
  119. return K.sqrt(K.mean(K.square(y_pred - y_true)))
  120. def calc_rmse(yhat, label): # used for loss cal, output float
  121. mse = 0.
  122. count = 0
  123. if len(yhat) != len(label):
  124. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "yhat and label have different lengths")
  125. return -1
  126. for i in range(len(yhat)):
  127. count += 1
  128. predicted_steering = yhat[i]
  129. steering = label[i]
  130. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", predicted_steering)
  131. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", steering)
  132. mse += (float(steering) - float(predicted_steering)) ** 2.
  133. return (mse / count) ** 0.5
  134. def calc_mse(yhat, label): # used for loss cal, output float
  135. mse = 0.
  136. count = 0
  137. if len(yhat) != len(label):
  138. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "yhat and label have different lengths")
  139. return -1
  140. for i in range(len(yhat)):
  141. count += 1
  142. predicted_steering = yhat[i]
  143. steering = label[i]
  144. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", predicted_steering)
  145. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", steering)
  146. mse += (float(steering) - float(predicted_steering)) ** 2.
  147. return (mse / count)
  148. def error_test():
  149. # Train variables define & Input variables parse
  150. # TODO
  151. batch_size = 64
  152. nb_epoch = 30
  153. image_shape = (100, 100)
  154. model_name = sys.argv[1]
  155. model_existed = sys.argv[2]
  156. seed_name = sys.argv[3]
  157. seed_str = seed_name.split("_")[3]
  158. seed_number = int(seed_str[0])
  159. data_collection_para = sys.argv[4].strip().split(
  160. ',') # data_collection_para : [is_new_seed(for entropy), is_err_collection(collect err or not), err_type(collect normal(1)/sampling(2)/random data(3))]
  161. is_err_collection = data_collection_para[1]
  162. err_type = data_collection_para[2]
  163. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", seed_number, seed_number, seed_number)
  164. dataset_path = '../train_carla/'
  165. test_dataset_path = '../scenario_runner-0.9.13/_out/'
  166. with open(test_dataset_path + 'label_test.csv', 'r') as f:
  167. rows = len(f.readlines()) - 1
  168. if rows == 0:
  169. return 0
  170. # --------------------------------------Build Model---------------------------------------- #
  171. # Dave_v1
  172. if model_name == '1':
  173. if model_existed == '0':
  174. model = Dave_orig()
  175. else:
  176. model = Dave_orig(None, True)
  177. save_model_name = '../trained_models/Model1.h5'
  178. # Dave_v2
  179. elif model_name == '2':
  180. # K.set_learning_phase(1)
  181. if model_existed == '0':
  182. model = Dave_norminit()
  183. else:
  184. model = Dave_norminit(None, True)
  185. save_model_name = '../trained_models/Model2.h5'
  186. # batch_size = 64 # 1 2 3 4 5 6x
  187. nb_epoch = 30
  188. # Dave_v3
  189. elif model_name == '3':
  190. # K.set_learning_phase(1)
  191. if model_existed == '0':
  192. model = Dave_dropout()
  193. else:
  194. model = Dave_dropout(None, True)
  195. save_model_name = '../trained_models/Model3.h5'
  196. # nb_epoch = 30
  197. # Udacity Epoch Model
  198. elif model_name == '4':
  199. if model_existed == '0':
  200. model = Epoch_model()
  201. else:
  202. model = Epoch_model(None, True)
  203. save_model_name = '../trained_models/Model4.h5'
  204. image_shape = (128, 128)
  205. nb_epoch = 30
  206. batch_size = 32
  207. else:
  208. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", bcolors.FAIL + 'invalid model name, must in [1, 2, 3, 4]' + bcolors.ENDC)
  209. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", bcolors.OKGREEN + 'model %s built' % model_name + bcolors.ENDC)
  210. # --------------------------------------Evaluation---------------------------------------- #
  211. # Different evaluation methods for different model
  212. if model_name != '4':
  213. K.set_learning_phase(0)
  214. test_generator, samples_per_epoch = load_carla_test_data(path=test_dataset_path, batch_size=batch_size,
  215. shape=image_shape)
  216. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", 'test samples: ', samples_per_epoch)
  217. loss = model.evaluate(test_generator, steps=math.ceil(samples_per_epoch * 1. / batch_size), verbose=1)
  218. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "model %s evaluate_generator loss: %.8f" % (model_name, loss))
  219. # --------------------------------------Predict Dave---------------------------------------- #
  220. filelist = []
  221. true_angle_list = []
  222. with open(test_dataset_path + 'label_test.csv', 'r') as f:
  223. rows = len(f.readlines()) - 1
  224. f.seek(0)
  225. for i, line in enumerate(f):
  226. if i == 0:
  227. continue
  228. file_name = line.split(',')[0]
  229. # TODO BEGIN
  230. # if i > int(rows * 0.75):
  231. filelist.append(test_dataset_path + 'center/' + file_name)
  232. true_angle_list.append(float(line.split(',')[2]))
  233. # TODO END
  234. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", 'filelist length: ', len(filelist))
  235. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", 'true_angle_list', true_angle_list)
  236. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------IMG READ-------")
  237. predict_angle_list = []
  238. imgs = []
  239. raw_imgs = []
  240. count = 0
  241. ori_image_size = (720, 1280)
  242. for f in filelist:
  243. count += 1
  244. if (count % 100 == 0):
  245. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", str(count) + ' images read')
  246. orig_name = f
  247. gen_img = preprocess_image(orig_name, image_shape)
  248. raw_img = preprocess_image(orig_name, ori_image_size)
  249. imgs.append(gen_img)
  250. raw_imgs.append(raw_img)
  251. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------IMG READ COMPLETE-------")
  252. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------DAVE PREDICT-------")
  253. count = 0
  254. imgs = np.array(imgs)
  255. for i in range(len(imgs)):
  256. predict_angle_list.append(model.predict(imgs[i])[0])
  257. # TODO: Add arrows to raw images and save
  258. # gen_img_deprocessed = draw_arrow3(deprocess_image(raw_imgs[i], (720, 1280, 3)), -true_angle_list[i], -predict_angle_list[-1])
  259. # imsave('./test_output_carla/' + str(i) + 'th_img.png', gen_img_deprocessed)
  260. # count += 1
  261. # if (count % 20 == 0):
  262. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", str(count) + ' images saved')
  263. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------DAVE PREDICT COMPLETE-------")
  264. yhat = predict_angle_list
  265. test_y = true_angle_list
  266. else:
  267. # label data read
  268. test_steering_log = path.join(test_dataset_path, 'label_test.csv')
  269. test_data = carla_load_steering_data(test_steering_log)
  270. test_frame_id = carla_load_frame_id(test_data)
  271. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", 'testset frame_id len: ', len(test_frame_id))
  272. # dataset divide
  273. time_list_test = []
  274. for j in range(0, len(test_frame_id)):
  275. time_list_test.append(test_frame_id[j])
  276. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", 'time_list_test len: ', len(time_list_test))
  277. test_generator = carla_data_generator(frame_id=test_frame_id,
  278. steering_log=test_steering_log,
  279. image_folder=test_dataset_path,
  280. unique_list=time_list_test,
  281. gen_type='test',
  282. batch_size=len(time_list_test),
  283. image_size=image_shape,
  284. shuffle=False,
  285. preprocess_input=normalize_input,
  286. preprocess_output=exact_output)
  287. # --------------------------------------Predict Epoch---------------------------------------- #
  288. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------EPOCH PREDICT-------")
  289. test_x, test_y = next(test_generator)
  290. yhat = model.predict(test_x)
  291. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------EPOCH PREDICT COMPLETE-------")
  292. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", yhat)
  293. loss = calc_mse(yhat, test_y)
  294. # --------------------------------------FIND ERROR---------------------------------------- #
  295. filelist_list = []
  296. list_row = []
  297. with open(test_dataset_path + 'label_test.csv', 'r') as f:
  298. rows = len(f.readlines()) - 1
  299. f.seek(0)
  300. for i, line in enumerate(f):
  301. if i == 0:
  302. continue
  303. file_name = line.split(',')[0]
  304. filelist_list.append(file_name)
  305. df = pd.read_csv(test_dataset_path + 'label_test.csv')
  306. df.head(2)
  307. df = df.drop(df.index[0:250])
  308. df.to_csv(test_dataset_path + 'label_test.csv', index=False, sep=',', encoding="utf-8")
  309. # path = '../scenario_runner-0.9.13/_out/'
  310. # shutil.rmtree(path + 'center') # 清空out
  311. # os.mkdir(path + 'center')
  312. a = np.loadtxt("diversity.txt")
  313. iterate = int(seed_name.split("_")[1])
  314. lamb = 1
  315. countcc = 0
  316. divadd = 0
  317. error_list = []
  318. lenm = len(filelist_list)
  319. # for i in range(6):
  320. # for j in range(125):
  321. # if a[i,j]==1:
  322. # divadd=divadd+1
  323. with open(test_dataset_path + 'model' + model_name + '_oriMSE.csv', 'r') as f:
  324. rows = len(f.readlines()) - 1
  325. f.seek(0)
  326. m = 0
  327. num_of_samples = 0
  328. for i, line in enumerate(f):
  329. if i == 0:
  330. continue
  331. if (int(seed_number) - 1) * 125 < i <= (int(seed_number) * 125):
  332. num_of_samples += 1
  333. predict_steering_angle = line.split(',')[1]
  334. oriMSE = line.split(',')[2]
  335. true_angle_gt = line.split(',')[3]
  336. if ((float(yhat[m]) - float(predict_steering_angle)) ** 2) > (lamb * float(oriMSE)):
  337. countcc = countcc + 1
  338. list_row.append(
  339. [filelist_list[m], predict_steering_angle, float(yhat[m]), true_angle_gt, model_name,
  340. seed_number, m])
  341. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", predict_steering_angle, float(yhat[m]), oriMSE)
  342. if a[seed_number - 1, m] == 0 and iterate != 0:
  343. a[seed_number - 1, m] = 1
  344. divadd = divadd + 1
  345. error_list.append(m)
  346. else:
  347. os.remove(test_dataset_path + 'center/' + filelist_list[m])
  348. if (m + 1) < lenm:
  349. m = m + 1
  350. else:
  351. break
  352. with open('/home/vangogh/software/FuzzScene/code/GA/sample_num.csv', 'a+', encoding='utf-8') as f:
  353. csv_writer = csv.writer(f)
  354. timestr = time.strftime("%Y%m%d-%H%M%S")
  355. csv_writer.writerow([timestr, model_name, seed_number, num_of_samples])
  356. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", countcc)
  357. np.savetxt("diversity.txt", a)
  358. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", is_err_collection)
  359. if is_err_collection == '1': # Collect Data for RQ2 RQ3
  360. if err_type == '1' or err_type == '2': # normal data
  361. file_path_img = '../Violated images/err_images/model_' + str(model_name) + '/'
  362. if err_type == '2': # sampling data
  363. file_path_sam = '../Violated images/sampling/model_' + str(model_name) + '/error.csv'
  364. with open(file_path_sam, 'a+', encoding='utf-8') as f:
  365. cw = csv.writer(f)
  366. for line in range(len(list_row)):
  367. cw.writerow(list_row[line])
  368. elif err_type == '3': # random data
  369. file_path_img = '../Violated images/random/'
  370. file_path_error = file_path_img + 'error.csv'
  371. for img in list_row:
  372. shutil.move(test_dataset_path + 'center/' + img[0], file_path_img)
  373. with open(file_path_error, 'a+', encoding='utf-8') as f:
  374. csv_writer = csv.writer(f)
  375. for line in range(len(list_row)):
  376. csv_writer.writerow(list_row[line])
  377. # --------------------------------------Get_OriMSE---------------------------------------- #
  378. # filelist = []
  379. # with open(test_dataset_path + 'label_test.csv', 'r') as f:
  380. # rows = len(f.readlines()) - 1
  381. # f.seek(0)
  382. # for i, line in enumerate(f):
  383. # if i == 0:
  384. # continue
  385. # file_name = line.split(',')[0]
  386. # filelist.append(file_name)
  387. # file_path = '/home/software/FuzzScene/code/scenario_runner-0.9.13/_out/model4_oriMSE.csv'
  388. # with open(file_path, 'a+', encoding='utf-8') as f:
  389. # for m in range(len(yhat)):
  390. # list_1 = [filelist[m],float(yhat[m]),loss]
  391. # csv_writer = csv.writer(f)
  392. # csv_writer.writerow(list_1)
  393. # --------------------------------------Visualize results---------------------------------------- #
  394. # loss = calc_mse(yhat, test_y)
  395. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", 'model %s MSE loss on test_set: %.8f' % (model_name, loss))
  396. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------PLOT RESULT-------")
  397. # plt.figure(figsize=(32, 8))
  398. # plt.plot(test_y, 'r.-', label='target')
  399. # plt.plot(yhat, 'b.-', label='predict')
  400. # plt.legend(loc='best')
  401. # plt.title("loss(%s) %.8f Evaluated on %d images" % (model.loss, loss, len(test_y)))
  402. # model_fullname = "%s_%d.png" % (model_name, int(time.time()))
  403. # plt.savefig('./trained_models/{}_{}_loss {}_{} epochs_{} batch_size.png'.format(model_name, int(time.time()), model.loss, nb_epoch, batch_size))
  404. # plt.show()
  405. # print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", "--------PLOT RESULT COMPLETE-------")
  406. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", bcolors.OKGREEN + 'Model evaluated' + bcolors.ENDC)
  407. file = open('list.txt', 'w', encoding='utf-8')
  408. for i in range(len(error_list)):
  409. file.write(str(error_list[i]) + '\n')
  410. file.close()
  411. np.savetxt("list.txt", error_list)
  412. return countcc, divadd, error_list
  413. if __name__ == '__main__':
  414. a = error_test()
  415. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", type(a))
  416. print("[" + os.path.basename(__file__) + ", Line " + str(sys._getframe().f_lineno) + ", " + sys._getframe().f_code.co_name + "] ", a)
  417. pricount, div, error_list = a
  418. error_count = './error_count.csv'
  419. with open(error_count, 'a+', encoding='utf-8') as f:
  420. csv_writer = csv.writer(f)
  421. csv_writer.writerow([sys.argv[3], pricount, div, error_list])