|
| 1 | +#!/bin/bash |
| 2 | +# Copyright The Lightning AI team. |
| 3 | +# |
| 4 | +# Licensed under the Apache License, Version 2.0 (the "License"); |
| 5 | +# you may not use this file except in compliance with the License. |
| 6 | +# You may obtain a copy of the License at |
| 7 | +# |
| 8 | +# http://www.apache.org/licenses/LICENSE-2.0 |
| 9 | +# |
| 10 | +# Unless required by applicable law or agreed to in writing, software |
| 11 | +# distributed under the License is distributed on an "AS IS" BASIS, |
| 12 | +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 13 | +# See the License for the specific language governing permissions and |
| 14 | +# limitations under the License. |
| 15 | + |
| 16 | +# THIS FILE ASSUMES IT IS RUN INSIDE THE tests DIRECTORY. |
| 17 | + |
| 18 | +# Batch size for testing: Determines how many standalone test invocations run in parallel |
| 19 | +# It can be set through the env variable NUM_PARALLEL_TESTS and defaults to 5 if not set |
| 20 | +test_batch_size="${NUM_PARALLEL_TESTS:-5}" |
| 21 | + |
| 22 | +# Source directory for coverage runs can be set with CODECOV_SOURCE and defaults to lightning. |
| 23 | +codecov_source="${COVERAGE_SOURCE:-"lightning"}" |
| 24 | + |
| 25 | +# The test directory is passed as the first argument to the script |
| 26 | +test_dir=$1 # parse the first argument |
| 27 | + |
| 28 | +# There is also timeout for the tests. |
| 29 | +# It can be set through the env variable TEST_TIMEOUT and defaults to 1200 seconds if not set 1200 seconds |
| 30 | +test_timeout="${TEST_TIMEOUT:-1200}" |
| 31 | + |
| 32 | +# Temporary file to store the collected tests |
| 33 | +COLLECTED_TESTS_FILE="collected_tests.txt" |
| 34 | + |
| 35 | +ls -lh . # show the contents of the directory |
| 36 | + |
| 37 | +# Python arguments for running the tests and coverage |
| 38 | +defaults=" -m coverage run --source ${codecov_source} --append -m pytest --no-header -v -s --color=yes --timeout=${test_timeout} --durations=0 " |
| 39 | +echo "Using defaults: ${defaults}" |
| 40 | + |
| 41 | +# Get the list of parametrizations. we need to call them separately. the last two lines are removed. |
| 42 | +# note: if there's a syntax error, this will fail with some garbled output |
| 43 | +python -um pytest ${test_dir} -q --collect-only --pythonwarnings ignore 2>&1 > $COLLECTED_TESTS_FILE |
| 44 | +# Early terminate if collection failed (e.g. syntax error) |
| 45 | +if [[ $? != 0 ]]; then |
| 46 | + cat $COLLECTED_TESTS_FILE |
| 47 | + printf "ERROR: test collection failed!\n" |
| 48 | + exit 1 |
| 49 | +fi |
| 50 | + |
| 51 | +# Initialize empty array |
| 52 | +tests=() |
| 53 | + |
| 54 | +# Read from file line by line |
| 55 | +while IFS= read -r line; do |
| 56 | + # Only keep lines containing "test_" |
| 57 | + if [[ $line == *"test_"* ]]; then |
| 58 | + # Extract part after test_dir/ |
| 59 | + pruned_line="${line#*${test_dir}/}" |
| 60 | + tests+=("${test_dir}/$pruned_line") |
| 61 | + fi |
| 62 | +done < $COLLECTED_TESTS_FILE |
| 63 | + |
| 64 | +# Count tests |
| 65 | +test_count=${#tests[@]} |
| 66 | + |
| 67 | +# Display results |
| 68 | +printf "collected $test_count tests:\n-------------------\n" |
| 69 | +printf "%s\n" "${tests[@]}" |
| 70 | +printf "\n===================\n" |
| 71 | + |
| 72 | +# if test count is one print warning |
| 73 | +if [[ $test_count -eq 1 ]]; then |
| 74 | + printf "WARNING: only one test found!\n" |
| 75 | +elif [ $test_count -eq 0 ]; then |
| 76 | + printf "ERROR: no tests found!\n" |
| 77 | + exit 1 |
| 78 | +fi |
| 79 | + |
| 80 | +# clear all the collected reports |
| 81 | +rm -f parallel_test_output-*.txt # in case it exists, remove it |
| 82 | + |
| 83 | +status=0 # aggregated script status |
| 84 | +report="" # final report |
| 85 | +pids=() # array of PID for running tests |
| 86 | +test_ids=() # array of indexes of running tests |
| 87 | +failed_tests=() # array of failed tests |
| 88 | +printf "Running $test_count tests in batches of $test_batch_size:\n" |
| 89 | +for i in "${!tests[@]}"; do |
| 90 | + test=${tests[$i]} |
| 91 | + printf "* Running test $((i+1))/$test_count: $test\n" |
| 92 | + |
| 93 | + # execute the test in the background |
| 94 | + # redirect to a log file that buffers test output. since the tests will run in the background, |
| 95 | + # we cannot let them output to std{out,err} because the outputs would be garbled together |
| 96 | + python ${defaults} "$test" &> "parallel_test_output-$i.txt" & |
| 97 | + test_ids+=($i) # save the test's id in an array with running tests |
| 98 | + pids+=($!) # save the PID in an array with running tests |
| 99 | + |
| 100 | + # if we reached the batch size, wait for all tests to finish |
| 101 | + if (( (($i + 1) % $test_batch_size == 0) || $i == $test_count-1 )); then |
| 102 | + printf "-> Waiting for batch to finish: $(IFS=' '; echo "${pids[@]}")\n" |
| 103 | + # wait for running tests |
| 104 | + for j in "${!test_ids[@]}"; do |
| 105 | + i=${test_ids[$j]} # restore the global test's id |
| 106 | + pid=${pids[$j]} # restore the particular PID |
| 107 | + test=${tests[$i]} # restore the test name |
| 108 | + printf "? Waiting for $tests >> parallel_test_output-$i.txt (PID: $pid)\n" |
| 109 | + wait -n $pid |
| 110 | + # get the exit status of the test |
| 111 | + test_status=$? |
| 112 | + # add row to the final report |
| 113 | + report+="Ran\t$test\t>> exit:$test_status\n" |
| 114 | + if [[ $test_status != 0 ]]; then |
| 115 | + # add the test to the failed tests array |
| 116 | + failed_tests+=($i) |
| 117 | + # Process exited with a non-zero exit status |
| 118 | + status=$test_status |
| 119 | + fi |
| 120 | + done |
| 121 | + printf "Starting over with a new batch...\n" |
| 122 | + test_ids=() # reset the test's id array |
| 123 | + pids=() # reset the PID array |
| 124 | + fi |
| 125 | +done |
| 126 | + |
| 127 | +# print test report with exit code for each test |
| 128 | +printf '=%.s' {1..80} |
| 129 | +printf "\n$report" |
| 130 | +printf '=%.s' {1..80} |
| 131 | +printf '\n' |
| 132 | + |
| 133 | +# print failed tests from duped logs |
| 134 | +if [[ ${#failed_tests[@]} -gt 0 ]]; then |
| 135 | + printf "Failed tests:\n" |
| 136 | + for i in "${failed_tests[@]}"; do |
| 137 | + printf '\n%.s' {1..5} |
| 138 | + printf '=%.s' {1..80} |
| 139 | + printf "\n${tests[$i]}\n" |
| 140 | + printf '-%.s' {1..80} |
| 141 | + printf "\n" |
| 142 | + # show the output of the failed test |
| 143 | + cat "parallel_test_output-$i.txt" |
| 144 | + printf "\n" |
| 145 | + printf '=%.s' {1..80} |
| 146 | + done |
| 147 | +else |
| 148 | + printf "All tests passed!\n" |
| 149 | +fi |
| 150 | + |
| 151 | +# exit with the worse test result |
| 152 | +exit $status |
0 commit comments