name: Self-hosted runner (push) on: push: branches: - master paths: - "src/**" - "tests/**" - ".github/**" # pull_request: repository_dispatch: jobs: run_tests_torch_and_tf_gpu: runs-on: self-hosted steps: - uses: actions/checkout@v2 - name: Python version run: | which python python --version pip --version - name: Current dir run: pwd - run: nvidia-smi - name: Loading cache. uses: actions/cache@v2 id: cache with: path: .env key: v0-tests_tf_torch_gpu-${{ hashFiles('setup.py') }} - name: Create new python env (on self-hosted runners we have to handle isolation ourselves) run: | python -m venv .env source .env/bin/activate which python python --version pip --version - name: Install dependencies run: | source .env/bin/activate pip install --upgrade pip pip install torch!=1.6.0 pip install .[sklearn,testing,onnxruntime] pip install git+https://github.com/huggingface/nlp - name: Are GPUs recognized by our DL frameworks run: | source .env/bin/activate python -c "import torch; print(torch.cuda.is_available())" - name: Run all non-slow tests on GPU env: TF_FORCE_GPU_ALLOW_GROWTH: "true" # TF_GPU_MEMORY_LIMIT: 4096 OMP_NUM_THREADS: 1 USE_CUDA: yes run: | source .env/bin/activate python -m pytest -n 2 --dist=loadfile -s ./tests/