1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236
|
#!/bin/bash
set -ex
# Travis legacy boxes give you 1.5 CPUs, container-based boxes give you 2 CPUs
export NPY_NUM_BUILD_JOBS=2
# setup env
if [ -r /usr/lib/libeatmydata/libeatmydata.so ]; then
# much faster package installation
export LD_PRELOAD='/usr/lib/libeatmydata/libeatmydata.so'
elif [ -r /usr/lib/*/libeatmydata.so ]; then
# much faster package installation
export LD_PRELOAD='/usr/$LIB/libeatmydata.so'
fi
source builds/venv/bin/activate
# travis venv tests override python
PYTHON=${PYTHON:-python}
PIP=${PIP:-pip}
if [ -n "$PYTHON_OPTS" ]; then
PYTHON="${PYTHON} $PYTHON_OPTS"
fi
# make some warnings fatal, mostly to match windows compilers
werrors="-Werror=vla -Werror=nonnull -Werror=pointer-arith"
werrors="$werrors -Werror=implicit-function-declaration"
# build with c99 by default
setup_base()
{
# use default python flags but remove sign-compare
sysflags="$($PYTHON -c "from distutils import sysconfig; \
print (sysconfig.get_config_var('CFLAGS'))")"
export CFLAGS="$sysflags $werrors -Wlogical-op -Wno-sign-compare"
build_args=()
# Strictly disable all kinds of optimizations
if [ -n "$WITHOUT_OPTIMIZATIONS" ]; then
build_args+=("--disable-optimization")
# only disable SIMD optimizations
elif [ -n "$WITHOUT_SIMD" ]; then
build_args+=("--cpu-baseline=none" "--cpu-dispatch=none")
elif [ -n "$CPU_DISPATCH" ]; then
build_args+=("--cpu-dispatch=$CPU_DISPATCH")
else
# SIMD extensions that need to be tested on both runtime and compile-time via (test_simd.py)
# any specified features will be ignored if they're not supported by compiler or platform
# note: it almost the same default value of --simd-test execpt adding policy `$werror` to treat all
# warnings as errors
build_args+=("--simd-test=\$werror BASELINE SSE2 SSE42 XOP FMA4 (FMA3 AVX2) AVX512F AVX512_SKX VSX VSX2 VSX3 NEON ASIMD VX VXE VXE2")
fi
if [ -z "$USE_DEBUG" ]; then
# activates '-Werror=undef' when DEBUG isn't enabled since _cffi_backend'
# extension breaks the build due to the following error:
#
# error: "HAVE_FFI_PREP_CIF_VAR" is not defined, evaluates to 0 [-Werror=undef]
# #if !HAVE_FFI_PREP_CIF_VAR && defined(__arm64__) && defined(__APPLE__)
#
export CFLAGS="$CFLAGS -Werror=undef"
$PYTHON setup.py build "${build_args[@]}" install 2>&1 | tee log
else
# The job run with USE_DEBUG=1 on travis needs this.
export CFLAGS=$CFLAGS" -Wno-maybe-uninitialized"
$PYTHON setup.py build "${build_args[@]}" build_src --verbose-cfg build_ext --inplace 2>&1 | tee log
fi
grep -v "_configtest" log \
| grep -vE "ld returned 1|no files found matching" \
| grep -vE "no previously-included files matching" \
| grep -vE "manifest_maker: standard file '-c'" \
| grep -E "warning\>" \
| tee warnings
if [ "$LAPACK" != "None" ]; then
[[ $(wc -l < warnings) -lt 1 ]]
fi
}
run_test()
{
# Install the test dependencies.
# Clear PYTHONOPTIMIZE when running `pip install -r test_requirements.txt`
# because version 2.19 of pycparser (a dependency of one of the packages
# in test_requirements.txt) does not provide a wheel, and the source tar
# file does not install correctly when Python's optimization level is set
# to strip docstrings (see https://github.com/eliben/pycparser/issues/291).
PYTHONOPTIMIZE="" $PIP install -r test_requirements.txt pyinstaller
DURATIONS_FLAG="--durations 10"
if [ -n "$USE_DEBUG" ]; then
export PYTHONPATH=$PWD
export MYPYPATH=$PWD
fi
if [ -n "$RUN_COVERAGE" ]; then
COVERAGE_FLAG=--coverage
fi
# We change directories to make sure that python won't find the copy
# of numpy in the source directory.
mkdir -p empty
cd empty
INSTALLDIR=$($PYTHON -c \
"import os; import numpy; print(os.path.dirname(numpy.__file__))")
export PYTHONWARNINGS=default
# This guard protects against any sudden unexpected changes that may adversely
# affect the compile-time SIMD features detection which could leave the SIMD code
# inactivated. Until now we have faced two cases:
#
# 1. Hardening the compile-time test files of Neon/ASIMD features without checking
# the sanity of the modification leading to disabling all optimizations on aarch64.
# see gh-21747
#
# 2. A sudden compiler upgrades by CI side on s390x that causes conflicts with the
# installed assembler leading to disabling the whole VX/E features, which made us
# merge SIMD code without testing it. Later, it was discovered that this code
# disrupted the NumPy build.
# see gh-21750, gh-21748
if [ -n "$EXPECT_CPU_FEATURES" ]; then
as_expected=$($PYTHON << EOF
from numpy.core._multiarray_umath import (__cpu_baseline__, __cpu_dispatch__)
features = __cpu_baseline__ + __cpu_dispatch__
expected = '$EXPECT_CPU_FEATURES'.upper().split()
diff = set(expected).difference(set(features))
if diff:
print("Unexpected compile-time CPU features detection!\n"
f"The current build is missing the support of CPU features '{diff}':\n"
"This error is triggered because of one of the following reasons:\n\n"
f"1. The compiler for somehow no longer supports any of these CPU features {diff}\n"
f"Note that the current build reports the support of the following features:\n{features}\n\n"
"2. Your code messed up the testing process! please check the build log and trace\n"
"compile-time feature tests and make sure that your patch has nothing to do with the tests failures."
)
EOF
)
if [ -n "$as_expected" ]; then
echo "$as_expected"
exit 1
fi
fi
if [ -n "$CHECK_BLAS" ]; then
$PYTHON -m pip install threadpoolctl
$PYTHON ../tools/openblas_support.py --check_version
fi
if [ -n "$RUN_FULL_TESTS" ]; then
# Travis has a limit on log length that is causeing test failutes.
# The fix here is to remove the "-v" from the runtest arguments.
export PYTHONWARNINGS="ignore::DeprecationWarning:virtualenv"
$PYTHON -b ../runtests.py -n --mode=full $DURATIONS_FLAG $COVERAGE_FLAG
else
$PYTHON ../runtests.py -n $DURATIONS_FLAG -- -rs
fi
if [ -n "$RUN_COVERAGE" ]; then
# move back up to the source dir because we want to execute
# gcov on the source files after the tests have gone through
# the code paths
cd ..
# execute gcov on source files
find . -name '*.gcno' -type f -exec gcov -pb {} +
# move the C line coverage report files to the same path
# as the Python report data
mv *.gcov empty
# move back to the previous path for good measure
# as the Python coverage data is there
cd empty
# Upload coverage files to codecov
bash <(curl -s https://codecov.io/bash) -X gcov -X coveragepy
fi
if [ -n "$USE_ASV" ]; then
pushd ../benchmarks
$PYTHON `which asv` check --python=same
$PYTHON `which asv` machine --machine travis
$PYTHON `which asv` dev -q 2>&1| tee asv-output.log
if grep -q Traceback asv-output.log; then
echo "Some benchmarks have errors!"
exit 1
fi
popd
fi
}
export PYTHON
export PIP
if [ -n "$USE_WHEEL" ] && [ $# -eq 0 ]; then
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
# adjust gcc flags if C coverage requested
if [ -n "$RUN_COVERAGE" ]; then
export NPY_DISTUTILS_APPEND_FLAGS=1
export CC='gcc --coverage'
export F77='gfortran --coverage'
export F90='gfortran --coverage'
export LDFLAGS='--coverage'
fi
$PYTHON setup.py build --warn-error build_src --verbose-cfg bdist_wheel
# Make another virtualenv to install into
$PYTHON -m venv venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
$PIP install --pre --no-index --upgrade --find-links=. numpy
popd
run_test
elif [ -n "$USE_SDIST" ] && [ $# -eq 0 ]; then
# temporary workaround for sdist failures.
$PYTHON -c "import fcntl; fcntl.fcntl(1, fcntl.F_SETFL, 0)"
# ensure some warnings are not issued
export CFLAGS=$CFLAGS" -Wno-sign-compare -Wno-unused-result"
$PYTHON setup.py sdist
# Make another virtualenv to install into
$PYTHON -m venv venv-for-wheel
. venv-for-wheel/bin/activate
# Move out of source directory to avoid finding local numpy
pushd dist
$PIP install numpy*
popd
run_test
else
setup_base
run_test
fi
|