testrunner.py 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354
  1. #!/usr/bin/env python3
  2. # Contest Management System - http://cms-dev.github.io/
  3. # Copyright © 2015-2018 Stefano Maggiolo <s.maggiolo@gmail.com>
  4. # Copyright © 2016 Amir Keivan Mohtashami <akmohtashami97@gmail.com>
  5. #
  6. # This program is free software: you can redistribute it and/or modify
  7. # it under the terms of the GNU Affero General Public License as
  8. # published by the Free Software Foundation, either version 3 of the
  9. # License, or (at your option) any later version.
  10. #
  11. # This program is distributed in the hope that it will be useful,
  12. # but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. # GNU Affero General Public License for more details.
  15. #
  16. # You should have received a copy of the GNU Affero General Public License
  17. # along with this program. If not, see <http://www.gnu.org/licenses/>.
  18. """Utility class to run functional-like tests."""
  19. import datetime
  20. import logging
  21. import os
  22. import subprocess
  23. from cms import TOKEN_MODE_FINITE
  24. from cmscommon.datetime import get_system_timezone
  25. from cmstestsuite import CONFIG
  26. from cmstestsuite.Test import TestFailure
  27. from cmstestsuite.Tests import ALL_LANGUAGES
  28. from cmstestsuite.functionaltestframework import FunctionalTestFramework
  29. from cmstestsuite.programstarter import ProgramStarter
  30. logger = logging.getLogger(__name__)
  31. class TestRunner:
  32. def __init__(self, test_list, contest_id=None, workers=1, cpu_limits=None):
  33. self.start_time = datetime.datetime.now()
  34. self.last_end_time = self.start_time
  35. self.framework = FunctionalTestFramework()
  36. self.load_cms_conf()
  37. self.ps = ProgramStarter(cpu_limits)
  38. # Map from task name to (task id, task_module).
  39. self.task_id_map = {}
  40. # String to append to objects' names to avoid collisions. Will be the
  41. # first positive integer i for which admin_<i> is not already
  42. # registered, and we will hope that if the admin name doesn't clash, no
  43. # other name will.
  44. self.suffix = None
  45. self.num_users = 0
  46. self.workers = workers
  47. if CONFIG["TEST_DIR"] is not None:
  48. # Set up our expected environment.
  49. os.chdir("%(TEST_DIR)s" % CONFIG)
  50. os.environ["PYTHONPATH"] = "%(TEST_DIR)s" % CONFIG
  51. self.start_generic_services()
  52. self.suffix = self.framework.initialize_aws()
  53. if contest_id is None:
  54. self.contest_id = self.create_contest()
  55. else:
  56. self.contest_id = int(contest_id)
  57. self.user_id = self.create_or_get_user()
  58. self.failures = []
  59. self.test_list = test_list
  60. self.n_tests = len(test_list)
  61. self.n_submissions = sum(len(test.languages) for test in test_list)
  62. self.n_user_tests = sum(len(test.languages) for test in test_list
  63. if test.user_tests)
  64. logging.info("Have %s submissions and %s user_tests in %s tests...",
  65. self.n_submissions, self.n_user_tests, self.n_tests)
  66. def load_cms_conf(self):
  67. try:
  68. git_root = subprocess.check_output(
  69. "git rev-parse --show-toplevel", shell=True,
  70. stderr=subprocess.DEVNULL).decode('utf-8').strip()
  71. except subprocess.CalledProcessError:
  72. git_root = None
  73. CONFIG["TEST_DIR"] = git_root
  74. CONFIG["CONFIG_PATH"] = "%s/config/cms.conf" % CONFIG["TEST_DIR"]
  75. if CONFIG["TEST_DIR"] is None:
  76. CONFIG["CONFIG_PATH"] = "/usr/local/etc/cms.conf"
  77. return self.framework.get_cms_config()
  78. def log_elapsed_time(self):
  79. end_time = datetime.datetime.now()
  80. logger.info("Time elapsed: %s, since last: %s",
  81. end_time - self.start_time,
  82. end_time - self.last_end_time)
  83. self.last_end_time = end_time
  84. # Service management.
  85. def start_generic_services(self):
  86. self.ps.start("LogService")
  87. self.ps.start("ResourceService")
  88. self.ps.start("Checker")
  89. self.ps.start("ScoringService")
  90. self.ps.start("AdminWebServer")
  91. # Just to verify it starts successfully.
  92. self.ps.start("RankingWebServer", shard=None)
  93. self.ps.wait()
  94. def shutdown(self):
  95. self.ps.stop_all()
  96. # Data creation.
  97. def create_contest(self):
  98. """Create a new contest.
  99. return (int): contest id.
  100. """
  101. start_time = datetime.datetime.utcnow()
  102. stop_time = start_time + datetime.timedelta(1, 0, 0)
  103. self.contest_id = self.framework.add_contest(
  104. name="testcontest_%s" % self.suffix,
  105. description="A test contest #%s." % self.suffix,
  106. languages=list(ALL_LANGUAGES),
  107. allow_password_authentication="checked",
  108. start=start_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
  109. stop=stop_time.strftime("%Y-%m-%d %H:%M:%S.%f"),
  110. timezone=get_system_timezone(),
  111. allow_user_tests="checked",
  112. token_mode=TOKEN_MODE_FINITE,
  113. token_max_number="100",
  114. token_min_interval="0",
  115. token_gen_initial="100",
  116. token_gen_number="0",
  117. token_gen_interval="1",
  118. token_gen_max="100",
  119. )
  120. logger.info("Created contest %s.", self.contest_id)
  121. return self.contest_id
  122. def create_or_get_user(self):
  123. """Create a new user if it doesn't exists already.
  124. return (int): user id.
  125. """
  126. self.num_users += 1
  127. def enumerify(x):
  128. if 11 <= x <= 13:
  129. return 'th'
  130. return {1: 'st', 2: 'nd', 3: 'rd'}.get(x % 10, 'th')
  131. username = "testrabbit_%s_%d" % (self.suffix, self.num_users)
  132. # Find a user that may already exist (from a previous contest).
  133. users = self.framework.get_users(self.contest_id)
  134. user_create_args = {
  135. "username": username,
  136. "password": "kamikaze",
  137. "method": "plaintext",
  138. "first_name": "Ms. Test",
  139. "last_name": "Wabbit the %d%s" % (self.num_users,
  140. enumerify(self.num_users))
  141. }
  142. if username in users:
  143. self.user_id = users[username]['id']
  144. self.framework.add_existing_user(self.user_id, **user_create_args)
  145. logging.info("Using existing user with id %s.", self.user_id)
  146. else:
  147. self.user_id = self.framework.add_user(
  148. contest_id=str(self.contest_id), **user_create_args)
  149. logging.info("Created user with id %s.", self.user_id)
  150. return self.user_id
  151. def create_or_get_task(self, task_module):
  152. """Create a new task if it does not exist.
  153. task_module (module): a task as in task/<name>.
  154. return (int): task id of the new (or existing) task.
  155. """
  156. name = "%s_%s" % (task_module.task_info['name'], self.suffix)
  157. # Have we done this before? Pull it out of our cache if so.
  158. if name in self.task_id_map:
  159. # Ensure we don't have multiple modules with the same task name.
  160. assert self.task_id_map[name][1] == task_module
  161. return self.task_id_map[name][0]
  162. task_create_args = {
  163. "token_mode": TOKEN_MODE_FINITE,
  164. "token_max_number": "100",
  165. "token_min_interval": "0",
  166. "token_gen_initial": "100",
  167. "token_gen_number": "0",
  168. "token_gen_interval": "1",
  169. "token_gen_max": "100",
  170. "max_submission_number": None,
  171. "max_user_test_number": None,
  172. "min_submission_interval": None,
  173. "min_user_test_interval": None,
  174. }
  175. task_create_args.update(task_module.task_info)
  176. # Update the name with the random bit to avoid conflicts.
  177. task_create_args["name"] = name
  178. # Find if the task already exists (the name make sure that if it
  179. # exists, it is already in out contest).
  180. tasks = self.framework.get_tasks()
  181. if name in tasks:
  182. # Then just use the existing one.
  183. task = tasks[name]
  184. task_id = task['id']
  185. self.task_id_map[name] = (task_id, task_module)
  186. self.framework.add_existing_task(
  187. task_id, contest_id=str(self.contest_id), **task_create_args)
  188. return task_id
  189. # Otherwise, we need to add the task ourselves.
  190. task_id = self.framework.add_task(
  191. contest_id=str(self.contest_id), **task_create_args)
  192. # add any managers
  193. code_path = os.path.join(
  194. os.path.dirname(task_module.__file__),
  195. "code")
  196. if hasattr(task_module, 'managers'):
  197. for manager in task_module.managers:
  198. mpath = os.path.join(code_path, manager)
  199. self.framework.add_manager(task_id, mpath)
  200. # add the task's test data.
  201. data_path = os.path.join(
  202. os.path.dirname(task_module.__file__),
  203. "data")
  204. for num, (input_file, output_file, public) \
  205. in enumerate(task_module.test_cases):
  206. ipath = os.path.join(data_path, input_file)
  207. opath = os.path.join(data_path, output_file)
  208. self.framework.add_testcase(task_id, num, ipath, opath, public)
  209. self.task_id_map[name] = (task_id, task_module)
  210. logging.info("Created task %s as id %s", name, task_id)
  211. return task_id
  212. # Test execution.
  213. def _all_submissions(self):
  214. """Yield all pairs (test, language)."""
  215. for test in self.test_list:
  216. for lang in test.languages:
  217. yield (test, lang)
  218. def _all_user_tests(self):
  219. """Yield all pairs (test, language)."""
  220. for test in self.test_list:
  221. if test.user_tests:
  222. for lang in test.languages:
  223. yield (test, lang)
  224. def submit_tests(self, concurrent_submit_and_eval=True):
  225. """Create the tasks, and submit for all languages in all tests.
  226. concurrent_submit_and_eval (boolean): if False, start ES only
  227. after CWS received all the submissions, with the goal of
  228. having a clearer view of the time each step takes.
  229. """
  230. # Pre-install all tasks in the contest. We start the other services
  231. # after this to ensure they pick up the new tasks before receiving
  232. # data for them.
  233. for test in self.test_list:
  234. self.create_or_get_task(test.task_module)
  235. # We start now only the services we need in order to submit and
  236. # we start the other ones while the submissions are being sent
  237. # out. A submission can arrive after ES's first sweep, but
  238. # before CWS connects to ES; if so, it will be ignored until
  239. # ES's second sweep, making the test flaky due to timeouts. By
  240. # waiting for ES to start before submitting, we ensure CWS can
  241. # send the notification for all submissions.
  242. self.ps.start("ContestWebServer", contest=self.contest_id)
  243. if concurrent_submit_and_eval:
  244. self.ps.start("EvaluationService", contest=self.contest_id)
  245. self.ps.wait()
  246. self.ps.start("ProxyService", contest=self.contest_id)
  247. for shard in range(self.workers):
  248. self.ps.start("Worker", shard)
  249. for i, (test, lang) in enumerate(self._all_submissions()):
  250. logging.info("Submitting submission %s/%s: %s (%s)",
  251. i + 1, self.n_submissions, test.name, lang)
  252. task_id = self.create_or_get_task(test.task_module)
  253. try:
  254. test.submit(task_id, self.user_id, lang)
  255. except TestFailure as f:
  256. logging.error("(FAILED (while submitting): %s)", f)
  257. self.failures.append((test, lang, str(f)))
  258. for i, (test, lang) in enumerate(self._all_user_tests()):
  259. logging.info("Submitting user test %s/%s: %s (%s)",
  260. i + 1, self.n_user_tests, test.name, lang)
  261. task_id = self.create_or_get_task(test.task_module)
  262. try:
  263. test.submit_user_test(task_id, self.user_id, lang)
  264. except TestFailure as f:
  265. logging.error("(FAILED (while submitting): %s)", f)
  266. self.failures.append((test, lang, str(f)))
  267. if not concurrent_submit_and_eval:
  268. self.ps.start("EvaluationService", contest=self.contest_id)
  269. self.ps.wait()
  270. def wait_for_evaluation(self):
  271. """Wait for all submissions to evaluate.
  272. The first will wait longer as ES prioritizes compilations.
  273. """
  274. for i, (test, lang) in enumerate(self._all_submissions()):
  275. logging.info("Waiting for submission %s/%s: %s (%s)",
  276. i + 1, self.n_submissions, test.name, lang)
  277. try:
  278. test.wait(self.contest_id, lang)
  279. except TestFailure as f:
  280. logging.error("(FAILED (while evaluating): %s)", f)
  281. self.failures.append((test, lang, str(f)))
  282. for i, (test, lang) in enumerate(self._all_user_tests()):
  283. logging.info("Waiting for user test %s/%s: %s (%s)",
  284. i + 1, self.n_user_tests, test.name, lang)
  285. try:
  286. test.wait_user_test(self.contest_id, lang)
  287. except TestFailure as f:
  288. logging.error("(FAILED (while evaluating user test): %s)", f)
  289. self.failures.append((test, lang, str(f)))
  290. return self.failures