sampling_predict.py 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133
  1. import sys,random
  2. import os
  3. sys.path.append("..")
  4. from configs import bcolors
  5. from utils import *
  6. import multiprocessing
  7. def preprocess(path, target_size):
  8. return preprocess_image(path, target_size)[0]
  9. def data_generator(xs, ys, target_size, batch_size=64):
  10. gen_state = 0
  11. while 1:
  12. if gen_state + batch_size > len(xs):
  13. paths = xs[gen_state: len(xs)]
  14. y = ys[gen_state: len(xs)]
  15. X = [preprocess(x, target_size) for x in paths]
  16. gen_state = 0
  17. else:
  18. paths = xs[gen_state: gen_state + batch_size]
  19. y = ys[gen_state: gen_state + batch_size]
  20. X = [preprocess(x, target_size) for x in paths]
  21. gen_state += batch_size
  22. yield np.array(X), np.array(y)
  23. def load_carla_test_data(path='', batch_size=32, shape=(100, 100)):
  24. xs = []
  25. ys = []
  26. start_load_time = time.time()
  27. with open(path + 'label_test.csv', 'r') as f:
  28. rows = len(f.readlines()) - 1
  29. f.seek(0)
  30. for i, line in enumerate(f):
  31. if i == 0:
  32. continue
  33. xs.append(path + 'center/' + line.split(',')[0])
  34. ys.append(1)
  35. # shuffle list of images
  36. c = list(zip(xs, ys))
  37. random.shuffle(c)
  38. xs, ys = zip(*c)
  39. train_xs = xs
  40. train_ys = ys
  41. train_generator = data_generator(train_xs, train_ys,
  42. target_size=shape,
  43. batch_size=batch_size)
  44. print(bcolors.OKBLUE + 'finished loading data, running time: {} seconds'.format(
  45. time.time() - start_load_time) + bcolors.ENDC)
  46. return train_generator, len(train_xs)
  47. def predict(model_name,seed_number,q):
  48. import tensorflow as tf
  49. os.environ["CUDA_VISIBLE_DEVICES"] = '0' #指定0号GPU可用
  50. config = tf.compat.v1.ConfigProto()
  51. config.gpu_options.per_process_gpu_memory_fraction = 0.6 # 程序最多只能占用指定gpu50%的显存
  52. config.gpu_options.allow_growth = True #程序按需申请内存
  53. sess = tf.compat.v1.Session(config = config)
  54. batch_size = 8
  55. image_shape = (100, 100)
  56. test_dataset_path = '../scenario_runner-0.9.13/_out/'
  57. model = tf.keras.applications.VGG16(
  58. include_top=False,
  59. weights= 'imagenet',
  60. input_tensor=None,
  61. input_shape=(100, 100, 3),
  62. pooling=None,
  63. classes=1000,
  64. classifier_activation="softmax",
  65. )
  66. for layer in model.layers:
  67. layer.trainable = False
  68. model.summary()
  69. x = tf.keras.layers.Flatten()(model.output) # 展平
  70. x = tf.keras.layers.Dense(4096, activation='relu')(x) # 定义全连接
  71. x = tf.keras.layers.Dropout(0.5)(x)
  72. x = tf.keras.layers.Dense(4096, activation='relu')(x)
  73. x = tf.keras.layers.Dropout(0.5)(x)
  74. predictions = tf.keras.layers.Dense(2, activation='softmax')(x) # softmax回归,10分类
  75. head_model = tf.keras.Model(inputs=model.input, outputs=predictions)
  76. s='../sampling_vgg/vgg_'+model_name+'_'+seed_number+'.h5'
  77. head_model.load_weights(s)
  78. head_model.compile(optimizer='adam',
  79. loss=tf.keras.losses.sparse_categorical_crossentropy,
  80. metrics=['accuracy'])
  81. # --------------------------------------Evaluation---------------------------------------- #
  82. K.set_learning_phase(0)
  83. test_generator, samples_per_epoch = load_carla_test_data(path=test_dataset_path, batch_size=batch_size,
  84. shape=image_shape)
  85. print('test samples: ', samples_per_epoch)
  86. loss = head_model.evaluate(test_generator,steps=math.ceil(samples_per_epoch * 1. / batch_size), verbose=1)
  87. prediction=head_model.predict(test_generator,steps=math.ceil(samples_per_epoch * 1. / batch_size),verbose = 1)
  88. sum_pre=0
  89. # print(prediction)
  90. for i in range(len(prediction)):
  91. if(prediction[i][1]>prediction[i][0]):
  92. sum_pre=sum_pre+1
  93. q.put(sum_pre)
  94. return None
  95. def prenum(model_name,seed_number):
  96. with open('../scenario_runner-0.9.13/_out/label_test.csv', 'r') as f:
  97. rows = len(f.readlines()) - 1
  98. with open('/home/vangogh/software/FuzzScene/code/GA/sample_num_vgg.csv', 'a+', encoding='utf-8') as f:
  99. csv_writer = csv.writer(f)
  100. timestr = time.strftime("%Y%m%d-%H%M%S")
  101. csv_writer.writerow([timestr, model_name, seed_number,rows])
  102. if(rows==0):
  103. return 0
  104. q = multiprocessing.Queue()
  105. p = multiprocessing.Process(target=predict,args=(model_name,seed_number,q))
  106. p.start()
  107. p.join()
  108. number=q.get()
  109. return number