PageRenderTime 2859ms CodeModel.GetById 29ms RepoModel.GetById 1ms app.codeStats 0ms

/hack/make-rules/test.sh

https://gitlab.com/unofficial-mirrors/kubernetes
Shell | 392 lines | 266 code | 51 blank | 75 comment | 33 complexity | 00c7ca623c7dcc2a4f941d708349cd29 MD5 | raw file
  1. #!/usr/bin/env bash
  2. # Copyright 2014 The Kubernetes Authors.
  3. #
  4. # Licensed under the Apache License, Version 2.0 (the "License");
  5. # you may not use this file except in compliance with the License.
  6. # You may obtain a copy of the License at
  7. #
  8. # http://www.apache.org/licenses/LICENSE-2.0
  9. #
  10. # Unless required by applicable law or agreed to in writing, software
  11. # distributed under the License is distributed on an "AS IS" BASIS,
  12. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13. # See the License for the specific language governing permissions and
  14. # limitations under the License.
  15. set -o errexit
  16. set -o nounset
  17. set -o pipefail
  18. KUBE_ROOT=$(dirname "${BASH_SOURCE}")/../..
  19. source "${KUBE_ROOT}/hack/lib/init.sh"
  20. kube::golang::setup_env
  21. # start the cache mutation detector by default so that cache mutators will be found
  22. KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
  23. export KUBE_CACHE_MUTATION_DETECTOR
  24. # panic the server on watch decode errors since they are considered coder mistakes
  25. KUBE_PANIC_WATCH_DECODE_ERROR="${KUBE_PANIC_WATCH_DECODE_ERROR:-true}"
  26. export KUBE_PANIC_WATCH_DECODE_ERROR
  27. # Handle case where OS has sha#sum commands, instead of shasum.
  28. if which shasum >/dev/null 2>&1; then
  29. SHA1SUM="shasum -a1"
  30. elif which sha1sum >/dev/null 2>&1; then
  31. SHA1SUM="sha1sum"
  32. else
  33. echo "Failed to find shasum or sha1sum utility." >&2
  34. exit 1
  35. fi
  36. kube::test::find_dirs() {
  37. (
  38. cd ${KUBE_ROOT}
  39. find -L . -not \( \
  40. \( \
  41. -path './_artifacts/*' \
  42. -o -path './bazel-*/*' \
  43. -o -path './_output/*' \
  44. -o -path './_gopath/*' \
  45. -o -path './cmd/kubeadm/test/*' \
  46. -o -path './contrib/podex/*' \
  47. -o -path './output/*' \
  48. -o -path './release/*' \
  49. -o -path './target/*' \
  50. -o -path './test/e2e/*' \
  51. -o -path './test/e2e_node/*' \
  52. -o -path './test/e2e_kubeadm/*' \
  53. -o -path './test/integration/*' \
  54. -o -path './third_party/*' \
  55. -o -path './staging/*' \
  56. -o -path './vendor/*' \
  57. \) -prune \
  58. \) -name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
  59. find -L . \
  60. -path './_output' -prune \
  61. -o -path './vendor/k8s.io/client-go/*' \
  62. -o -path './vendor/k8s.io/apiserver/*' \
  63. -o -path './test/e2e_node/system/*' \
  64. -name '*_test.go' -print0 | xargs -0n1 dirname | sed "s|^\./|${KUBE_GO_PACKAGE}/|" | LC_ALL=C sort -u
  65. # run tests for client-go
  66. find ./staging/src/k8s.io/client-go -name '*_test.go' \
  67. -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
  68. # run tests for apiserver
  69. find ./staging/src/k8s.io/apiserver -name '*_test.go' \
  70. -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
  71. # run tests for apimachinery
  72. find ./staging/src/k8s.io/apimachinery -name '*_test.go' \
  73. -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
  74. find ./staging/src/k8s.io/kube-aggregator -name '*_test.go' \
  75. -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
  76. find ./staging/src/k8s.io/apiextensions-apiserver -not \( \
  77. \( \
  78. -path '*/test/integration/*' \
  79. \) -prune \
  80. \) -name '*_test.go' \
  81. -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
  82. find ./staging/src/k8s.io/sample-apiserver -name '*_test.go' \
  83. -name '*_test.go' -print0 | xargs -0n1 dirname | sed 's|^\./staging/src/|./vendor/|' | LC_ALL=C sort -u
  84. )
  85. }
  86. KUBE_TIMEOUT=${KUBE_TIMEOUT:--timeout 120s}
  87. KUBE_COVER=${KUBE_COVER:-n} # set to 'y' to enable coverage collection
  88. KUBE_COVERMODE=${KUBE_COVERMODE:-atomic}
  89. # How many 'go test' instances to run simultaneously when running tests in
  90. # coverage mode.
  91. KUBE_COVERPROCS=${KUBE_COVERPROCS:-4}
  92. KUBE_RACE=${KUBE_RACE:-} # use KUBE_RACE="-race" to enable race testing
  93. # Set to the goveralls binary path to report coverage results to Coveralls.io.
  94. KUBE_GOVERALLS_BIN=${KUBE_GOVERALLS_BIN:-}
  95. # Lists of API Versions of each groups that should be tested, groups are
  96. # separated by comma, lists are separated by semicolon. e.g.,
  97. # "v1,compute/v1alpha1,experimental/v1alpha2;v1,compute/v2,experimental/v1alpha3"
  98. # FIXME: due to current implementation of a test client (see: pkg/api/testapi/testapi.go)
  99. # ONLY the last version is tested in each group.
  100. ALL_VERSIONS_CSV=$(IFS=',';echo "${KUBE_AVAILABLE_GROUP_VERSIONS[*]// /,}";IFS=$)
  101. KUBE_TEST_API_VERSIONS="${KUBE_TEST_API_VERSIONS:-${ALL_VERSIONS_CSV}}"
  102. # once we have multiple group supports
  103. # Create a junit-style XML test report in this directory if set.
  104. KUBE_JUNIT_REPORT_DIR=${KUBE_JUNIT_REPORT_DIR:-}
  105. # Set to 'y' to keep the verbose stdout from tests when KUBE_JUNIT_REPORT_DIR is
  106. # set.
  107. KUBE_KEEP_VERBOSE_TEST_OUTPUT=${KUBE_KEEP_VERBOSE_TEST_OUTPUT:-n}
  108. kube::test::usage() {
  109. kube::log::usage_from_stdin <<EOF
  110. usage: $0 [OPTIONS] [TARGETS]
  111. OPTIONS:
  112. -p <number> : number of parallel workers, must be >= 1
  113. EOF
  114. }
  115. isnum() {
  116. [[ "$1" =~ ^[0-9]+$ ]]
  117. }
  118. PARALLEL="${PARALLEL:-1}"
  119. while getopts "hp:i:" opt ; do
  120. case $opt in
  121. h)
  122. kube::test::usage
  123. exit 0
  124. ;;
  125. p)
  126. PARALLEL="$OPTARG"
  127. if ! isnum "${PARALLEL}" || [[ "${PARALLEL}" -le 0 ]]; then
  128. kube::log::usage "'$0': argument to -p must be numeric and greater than 0"
  129. kube::test::usage
  130. exit 1
  131. fi
  132. ;;
  133. i)
  134. kube::log::usage "'$0': use GOFLAGS='-count <num-iterations>'"
  135. kube::test::usage
  136. exit 1
  137. ;;
  138. ?)
  139. kube::test::usage
  140. exit 1
  141. ;;
  142. :)
  143. kube::log::usage "Option -$OPTARG <value>"
  144. kube::test::usage
  145. exit 1
  146. ;;
  147. esac
  148. done
  149. shift $((OPTIND - 1))
  150. # Use eval to preserve embedded quoted strings.
  151. eval "goflags=(${GOFLAGS:-})"
  152. eval "testargs=(${KUBE_TEST_ARGS:-})"
  153. # Used to filter verbose test output.
  154. go_test_grep_pattern=".*"
  155. # The go-junit-report tool needs full test case information to produce a
  156. # meaningful report.
  157. if [[ -n "${KUBE_JUNIT_REPORT_DIR}" ]] ; then
  158. goflags+=(-v)
  159. # Show only summary lines by matching lines like "status package/test"
  160. go_test_grep_pattern="^[^[:space:]]\+[[:space:]]\+[^[:space:]]\+/[^[[:space:]]\+"
  161. fi
  162. # Filter out arguments that start with "-" and move them to goflags.
  163. testcases=()
  164. for arg; do
  165. if [[ "${arg}" == -* ]]; then
  166. goflags+=("${arg}")
  167. else
  168. testcases+=("${arg}")
  169. fi
  170. done
  171. if [[ ${#testcases[@]} -eq 0 ]]; then
  172. testcases=($(kube::test::find_dirs))
  173. fi
  174. set -- "${testcases[@]+${testcases[@]}}"
  175. junitFilenamePrefix() {
  176. if [[ -z "${KUBE_JUNIT_REPORT_DIR}" ]]; then
  177. echo ""
  178. return
  179. fi
  180. mkdir -p "${KUBE_JUNIT_REPORT_DIR}"
  181. # This filename isn't parsed by anything, and we must avoid
  182. # exceeding 255 character filename limit. KUBE_TEST_API
  183. # barely fits there and in coverage mode test names are
  184. # appended to generated file names, easily exceeding
  185. # 255 chars in length. So let's just use a sha1 hash of it.
  186. local KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
  187. echo "${KUBE_JUNIT_REPORT_DIR}/junit_${KUBE_TEST_API_HASH}_$(kube::util::sortable_date)"
  188. }
  189. verifyAndSuggestPackagePath() {
  190. local specified_package_path="$1"
  191. local alternative_package_path="$2"
  192. local original_package_path="$3"
  193. local suggestion_package_path="$4"
  194. if ! [ -d "$specified_package_path" ]; then
  195. # Because k8s sets a localized $GOPATH for testing, seeing the actual
  196. # directory can be confusing. Instead, just show $GOPATH if it exists in the
  197. # $specified_package_path.
  198. local printable_package_path=$(echo "$specified_package_path" | sed "s|$GOPATH|\$GOPATH|")
  199. kube::log::error "specified test path '$printable_package_path' does not exist"
  200. if [ -d "$alternative_package_path" ]; then
  201. kube::log::info "try changing \"$original_package_path\" to \"$suggestion_package_path\""
  202. fi
  203. exit 1
  204. fi
  205. }
  206. verifyPathsToPackagesUnderTest() {
  207. local packages_under_test=($@)
  208. for package_path in "${packages_under_test[@]}"; do
  209. local local_package_path="$package_path"
  210. local go_package_path="$GOPATH/src/$package_path"
  211. if [[ "${package_path:0:2}" == "./" ]] ; then
  212. verifyAndSuggestPackagePath "$local_package_path" "$go_package_path" "$package_path" "${package_path:2}"
  213. else
  214. verifyAndSuggestPackagePath "$go_package_path" "$local_package_path" "$package_path" "./$package_path"
  215. fi
  216. done
  217. }
  218. produceJUnitXMLReport() {
  219. local -r junit_filename_prefix=$1
  220. if [[ -z "${junit_filename_prefix}" ]]; then
  221. return
  222. fi
  223. local test_stdout_filenames
  224. local junit_xml_filename
  225. test_stdout_filenames=$(ls ${junit_filename_prefix}*.stdout)
  226. junit_xml_filename="${junit_filename_prefix}.xml"
  227. if ! command -v go-junit-report >/dev/null 2>&1; then
  228. kube::log::error "go-junit-report not found; please install with " \
  229. "go get -u github.com/jstemmer/go-junit-report"
  230. return
  231. fi
  232. cat ${test_stdout_filenames} | go-junit-report > "${junit_xml_filename}"
  233. if [[ ! ${KUBE_KEEP_VERBOSE_TEST_OUTPUT} =~ ^[yY]$ ]]; then
  234. rm ${test_stdout_filenames}
  235. fi
  236. kube::log::status "Saved JUnit XML test report to ${junit_xml_filename}"
  237. }
  238. runTests() {
  239. local junit_filename_prefix
  240. junit_filename_prefix=$(junitFilenamePrefix)
  241. verifyPathsToPackagesUnderTest "$@"
  242. # If we're not collecting coverage, run all requested tests with one 'go test'
  243. # command, which is much faster.
  244. if [[ ! ${KUBE_COVER} =~ ^[yY]$ ]]; then
  245. kube::log::status "Running tests without code coverage"
  246. go test "${goflags[@]:+${goflags[@]}}" \
  247. ${KUBE_RACE} ${KUBE_TIMEOUT} "${@}" \
  248. "${testargs[@]:+${testargs[@]}}" \
  249. | tee ${junit_filename_prefix:+"${junit_filename_prefix}.stdout"} \
  250. | grep --binary-files=text "${go_test_grep_pattern}" && rc=$? || rc=$?
  251. produceJUnitXMLReport "${junit_filename_prefix}"
  252. return ${rc}
  253. fi
  254. # Create coverage report directories.
  255. KUBE_TEST_API_HASH="$(echo -n "${KUBE_TEST_API//\//-}"| ${SHA1SUM} |awk '{print $1}')"
  256. cover_report_dir="/tmp/k8s_coverage/${KUBE_TEST_API_HASH}/$(kube::util::sortable_date)"
  257. cover_profile="coverage.out" # Name for each individual coverage profile
  258. kube::log::status "Saving coverage output in '${cover_report_dir}'"
  259. mkdir -p "${@+${@/#/${cover_report_dir}/}}"
  260. # Run all specified tests, collecting coverage results. Go currently doesn't
  261. # support collecting coverage across multiple packages at once, so we must issue
  262. # separate 'go test' commands for each package and then combine at the end.
  263. # To speed things up considerably, we can at least use xargs -P to run multiple
  264. # 'go test' commands at once.
  265. # To properly parse the test results if generating a JUnit test report, we
  266. # must make sure the output from PARALLEL runs is not mixed. To achieve this,
  267. # we spawn a subshell for each PARALLEL process, redirecting the output to
  268. # separate files.
  269. # ignore paths:
  270. # vendor/k8s.io/code-generator/cmd/generator: is fragile when run under coverage, so ignore it for now.
  271. # https://github.com/kubernetes/kubernetes/issues/24967
  272. # vendor/k8s.io/client-go/1.4/rest: causes cover internal errors
  273. # https://github.com/golang/go/issues/16540
  274. cover_ignore_dirs="vendor/k8s.io/code-generator/cmd/generator|vendor/k8s.io/client-go/1.4/rest"
  275. for path in $(echo $cover_ignore_dirs | sed 's/|/ /g'); do
  276. echo -e "skipped\tk8s.io/kubernetes/$path"
  277. done
  278. printf "%s\n" "${@}" \
  279. | grep -Ev $cover_ignore_dirs \
  280. | xargs -I{} -n 1 -P ${KUBE_COVERPROCS} \
  281. bash -c "set -o pipefail; _pkg=\"\$0\"; _pkg_out=\${_pkg//\//_}; \
  282. go test ${goflags[@]:+${goflags[@]}} \
  283. ${KUBE_RACE} \
  284. ${KUBE_TIMEOUT} \
  285. -cover -covermode=\"${KUBE_COVERMODE}\" \
  286. -coverprofile=\"${cover_report_dir}/\${_pkg}/${cover_profile}\" \
  287. \"\${_pkg}\" \
  288. ${testargs[@]:+${testargs[@]}} \
  289. | tee ${junit_filename_prefix:+\"${junit_filename_prefix}-\$_pkg_out.stdout\"} \
  290. | grep \"${go_test_grep_pattern}\"" \
  291. {} \
  292. && test_result=$? || test_result=$?
  293. produceJUnitXMLReport "${junit_filename_prefix}"
  294. COMBINED_COVER_PROFILE="${cover_report_dir}/combined-coverage.out"
  295. {
  296. # The combined coverage profile needs to start with a line indicating which
  297. # coverage mode was used (set, count, or atomic). This line is included in
  298. # each of the coverage profiles generated when running 'go test -cover', but
  299. # we strip these lines out when combining so that there's only one.
  300. echo "mode: ${KUBE_COVERMODE}"
  301. # Include all coverage reach data in the combined profile, but exclude the
  302. # 'mode' lines, as there should be only one.
  303. for x in `find "${cover_report_dir}" -name "${cover_profile}"`; do
  304. cat $x | grep -h -v "^mode:" || true
  305. done
  306. } >"${COMBINED_COVER_PROFILE}"
  307. coverage_html_file="${cover_report_dir}/combined-coverage.html"
  308. go tool cover -html="${COMBINED_COVER_PROFILE}" -o="${coverage_html_file}"
  309. kube::log::status "Combined coverage report: ${coverage_html_file}"
  310. return ${test_result}
  311. }
  312. reportCoverageToCoveralls() {
  313. if [[ ${KUBE_COVER} =~ ^[yY]$ ]] && [[ -x "${KUBE_GOVERALLS_BIN}" ]]; then
  314. kube::log::status "Reporting coverage results to Coveralls for service ${CI_NAME:-}"
  315. ${KUBE_GOVERALLS_BIN} -coverprofile="${COMBINED_COVER_PROFILE}" \
  316. ${CI_NAME:+"-service=${CI_NAME}"} \
  317. ${COVERALLS_REPO_TOKEN:+"-repotoken=${COVERALLS_REPO_TOKEN}"} \
  318. || true
  319. fi
  320. }
  321. checkFDs() {
  322. # several unittests panic when httptest cannot open more sockets
  323. # due to the low default files limit on OS X. Warn about low limit.
  324. local fileslimit="$(ulimit -n)"
  325. if [[ $fileslimit -lt 1000 ]]; then
  326. echo "WARNING: ulimit -n (files) should be at least 1000, is $fileslimit, may cause test failure";
  327. fi
  328. }
  329. checkFDs
  330. # Convert the CSVs to arrays.
  331. IFS=';' read -a apiVersions <<< "${KUBE_TEST_API_VERSIONS}"
  332. apiVersionsCount=${#apiVersions[@]}
  333. for (( i=0; i<${apiVersionsCount}; i++ )); do
  334. apiVersion=${apiVersions[i]}
  335. echo "Running tests for APIVersion: $apiVersion"
  336. # KUBE_TEST_API sets the version of each group to be tested.
  337. KUBE_TEST_API="${apiVersion}" runTests "$@"
  338. done
  339. # We might run the tests for multiple versions, but we want to report only
  340. # one of them to coveralls. Here we report coverage from the last run.
  341. reportCoverageToCoveralls