123456789101112131415161718192021222324252627282930313233343536373839404142434445464748 |
- def submit_experiment_to_azureml(test, test_folder, test_markers, junitxml,
- run_config, experiment):
- """
- Submitting the experiment to AzureML actually runs the script.
- Args:
- test (str) - pytest script, folder/test
- such as ./tests/ci/run_pytest.py
- test_folder (str) - folder where tests to run are stored,
- like ./tests/unit
- test_markers (str) - test markers used by pytest
- "not notebooks and not spark and not gpu"
- junitxml (str) - file of output summary of tests run
- note "--junitxml" is required as part
- of the string
- Example: "--junitxml reports/test-unit.xml"
- run_config - environment configuration
- experiment - instance of an Experiment, a collection of
- trials where each trial is a run.
- Return:
- run : AzureML run or trial
- """
- logger.debug('submit: testfolder {}'.format(test_folder))
- logger.debug('junitxml: {}'.format(junitxml))
- project_folder = "."
- script_run_config = ScriptRunConfig(source_directory=project_folder,
- script=test,
- run_config=run_config,
- arguments=["--testfolder",
- test_folder,
- "--testmarkers",
- test_markers,
- "--xmlname",
- junitxml]
- )
- run = experiment.submit(script_run_config)
- # waits only for configuration to complete
- run.wait_for_completion(show_output=True, wait_post_processing=True)
- # test logs can also be found on azure
- # go to azure portal to see log in azure ws and look for experiment name
- # and look for individual run
- logger.debug('files {}'.format(run.get_file_names))
- return run
|