From afe8e564b3eec6a8399a0d4660f19a4226069ad8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=C5=81ukasz=20Ole=C5=9B?= Date: Tue, 31 Oct 2023 17:22:31 +0100 Subject: [PATCH] Fix CI failing tests (#416) * Use auto mode for asyncio tests * Drop testing python 3.8 * Set build.os * Set build options for readthedocs * Switch everywhere to 3.8 --- .github/workflows/ci.yml | 4 +- .github/workflows/release.yml | 4 +- .readthedocs.yml | 6 ++- ci/environment-3.8.yml | 38 ------------------- ci/scripts/test_imports.sh | 4 +- dask_cloudprovider/aws/tests/test_ec2.py | 6 +-- dask_cloudprovider/azure/azurevm.py | 2 +- .../azure/tests/test_azurevm.py | 2 +- dask_cloudprovider/gcp/tests/test_gcp.py | 4 +- doc/source/packer.rst | 8 ++-- pytest.ini | 2 + setup.py | 2 +- 12 files changed, 25 insertions(+), 57 deletions(-) delete mode 100644 ci/environment-3.8.yml create mode 100644 pytest.ini diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 0b87349a..f309a92e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,7 +9,7 @@ jobs: fail-fast: true matrix: os: ["ubuntu-latest"] - python-version: ["3.8", "3.9", "3.10"] + python-version: ["3.9", "3.10"] steps: - name: Checkout source @@ -50,7 +50,7 @@ jobs: uses: conda-incubator/setup-miniconda@v2 with: miniconda-version: "latest" - python-version: "3.8" + python-version: "3.9" - name: Run import tests shell: bash -l {0} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 10b34b50..30fd3238 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,10 +10,10 @@ jobs: - name: Checkout source uses: actions/checkout@v2 - - name: Set up Python 3.8 + - name: Set up Python 3.9 uses: actions/setup-python@v1 with: - python-version: 3.8 + python-version: 3.9 - name: Install pypa/build run: python -m pip install build wheel diff --git a/.readthedocs.yml b/.readthedocs.yml index d11ea093..1285a81e 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -6,7 +6,6 @@ sphinx: formats: all python: - version: "3.8" install: - method: pip path: . @@ -16,3 +15,8 @@ python: submodules: include: all + +build: + os: ubuntu-22.04 + tools: + python: "3" diff --git a/ci/environment-3.8.yml b/ci/environment-3.8.yml deleted file mode 100644 index d25e912d..00000000 --- a/ci/environment-3.8.yml +++ /dev/null @@ -1,38 +0,0 @@ -name: dask-cloudprovider-test -channels: - - defaults - - conda-forge -dependencies: - - python=3.8 - - nomkl - - pip - # Dask - - dask - # testing / CI - - flake8 - - ipywidgets - - pytest - - pytest-asyncio - - black >=20.8b1 - - pyyaml - # dask dependencies - - cloudpickle - - toolz - - cytoolz - - numpy - - partd - # distributed dependencies - - click >=6.6 - - msgpack-python - - psutil >=5.0 - - six - - sortedcontainers !=2.0.0,!=2.0.1 - - tblib - - tornado >=5 - - zict >=0.1.3 - # `event_loop_policy` change See https://github.com/dask/distributed/pull/4212 - - pytest-asyncio >=0.14.0 - - pytest-timeout - - pip: - - git+https://github.com/dask/dask.git@main - - git+https://github.com/dask/distributed@main diff --git a/ci/scripts/test_imports.sh b/ci/scripts/test_imports.sh index c50a56bf..16a4f652 100644 --- a/ci/scripts/test_imports.sh +++ b/ci/scripts/test_imports.sh @@ -3,9 +3,9 @@ set -o errexit test_import () { - echo "Create environment: python=3.8 $1" + echo "Create environment: python=3.9 $1" # Create an empty environment - conda create -q -y -n test-imports -c conda-forge python=3.8 + conda create -q -y -n test-imports -c conda-forge python=3.9 conda activate test-imports pip install -e .[$1] echo "python -c '$2'" diff --git a/dask_cloudprovider/aws/tests/test_ec2.py b/dask_cloudprovider/aws/tests/test_ec2.py index eef9cead..496ed073 100644 --- a/dask_cloudprovider/aws/tests/test_ec2.py +++ b/dask_cloudprovider/aws/tests/test_ec2.py @@ -48,7 +48,7 @@ async def cluster_rapids(): # Deep Learning AMI (Ubuntu 18.04) ami="ami-0c7c7d78f752f8f17", # Python version must match local version and CUDA version must match AMI CUDA version - docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.9", instance_type="p3.2xlarge", bootstrap=False, filesystem_size=120, @@ -65,7 +65,7 @@ async def cluster_rapids_packer(): # Packer AMI ami="ami-04e5539cb82859e69", # Python version must match local version and CUDA version must match AMI CUDA version - docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.9", instance_type="p3.2xlarge", bootstrap=False, filesystem_size=120, @@ -202,7 +202,7 @@ async def test_get_cloud_init_rapids(): # Deep Learning AMI (Ubuntu 18.04) ami="ami-0c7c7d78f752f8f17", # Python version must match local version and CUDA version must match AMI CUDA version - docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.9", instance_type="p3.2xlarge", bootstrap=False, filesystem_size=120, diff --git a/dask_cloudprovider/azure/azurevm.py b/dask_cloudprovider/azure/azurevm.py index a2accbb0..0d09f7a1 100644 --- a/dask_cloudprovider/azure/azurevm.py +++ b/dask_cloudprovider/azure/azurevm.py @@ -433,7 +433,7 @@ class AzureVMCluster(VMCluster): ... security_group="", ... n_workers=1, ... vm_size="Standard_NC12s_v3", # Or any NVIDIA GPU enabled size - ... docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.8", + ... docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.9", ... worker_class="dask_cuda.CUDAWorker") >>> from dask.distributed import Client >>> client = Client(cluster) diff --git a/dask_cloudprovider/azure/tests/test_azurevm.py b/dask_cloudprovider/azure/tests/test_azurevm.py index 7c7439f7..20aab38c 100644 --- a/dask_cloudprovider/azure/tests/test_azurevm.py +++ b/dask_cloudprovider/azure/tests/test_azurevm.py @@ -87,7 +87,7 @@ async def test_create_rapids_cluster_sync(): with AzureVMCluster( vm_size="Standard_NC12s_v3", - docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.9", worker_class="dask_cuda.CUDAWorker", worker_options={"rmm_pool_size": "15GB"}, ) as cluster: diff --git a/dask_cloudprovider/gcp/tests/test_gcp.py b/dask_cloudprovider/gcp/tests/test_gcp.py index 1e017ca7..915f5d88 100644 --- a/dask_cloudprovider/gcp/tests/test_gcp.py +++ b/dask_cloudprovider/gcp/tests/test_gcp.py @@ -125,7 +125,7 @@ async def test_create_rapids_cluster(): filesystem_size=50, ngpus=2, gpu_type="nvidia-tesla-t4", - docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.9", worker_class="dask_cuda.CUDAWorker", worker_options={"rmm_pool_size": "15GB"}, asynchronous=True, @@ -168,7 +168,7 @@ def test_create_rapids_cluster_sync(): filesystem_size=50, ngpus=2, gpu_type="nvidia-tesla-t4", - docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda11.0-runtime-ubuntu18.04-py3.9", worker_class="dask_cuda.CUDAWorker", worker_options={"rmm_pool_size": "15GB"}, asynchronous=False, diff --git a/doc/source/packer.rst b/doc/source/packer.rst index b5ea698e..0ad7445a 100644 --- a/doc/source/packer.rst +++ b/doc/source/packer.rst @@ -218,7 +218,7 @@ To launch `RAPIDS `_ on AWS EC2 we can select a GPU instance cluster = EC2Cluster( ami="ami-0c7c7d78f752f8f17", # Deep Learning AMI (this ID varies by region so find yours in the AWS Console) - docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.9", instance_type="p3.2xlarge", bootstrap=False, # Docker is already installed on the Deep Learning AMI filesystem_size=120, @@ -263,7 +263,7 @@ pull the RAPIDS Docker image. That way when a scheduler or worker VM is created { "type": "shell", "inline": [ - "docker pull rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8" + "docker pull rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.9" ] } ] @@ -315,7 +315,7 @@ We can then run our code snippet again but this time it will take less than 5 mi cluster = EC2Cluster( ami="ami-04e5539cb82859e69", # AMI ID provided by Packer - docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.8", + docker_image="rapidsai/rapidsai:cuda10.1-runtime-ubuntu18.04-py3.9", instance_type="p3.2xlarge", bootstrap=False, filesystem_size=120, @@ -323,4 +323,4 @@ We can then run our code snippet again but this time it will take less than 5 mi cluster.scale(2) client = Client(cluster) - # Your cluster is ready to use \ No newline at end of file + # Your cluster is ready to use diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 00000000..2f4c80e3 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +asyncio_mode = auto diff --git a/setup.py b/setup.py index 7d4e96ca..64ca18a5 100644 --- a/setup.py +++ b/setup.py @@ -37,5 +37,5 @@ [console_scripts] dask-ecs=dask_cloudprovider.cli.ecs:go """, - python_requires=">=3.8", + python_requires=">=3.9", )