diff --git a/tests/test_torchserve.sh b/tests/test_torchserve.sh index 6d3f7ed7..09a8f138 100755 --- a/tests/test_torchserve.sh +++ b/tests/test_torchserve.sh @@ -16,7 +16,7 @@ torch-model-archiver --force --model-name yolov8n --version 1.0 --serialized-fil rm -rf model_store && mkdir model_store mv yolov8n.mar model_store/ # TODO: --runtime nvidia is required for Orin, --gpus all for x86 -docker run -v $(pwd)/model_store:/model_store -p 8080:8080 --rm --name testts --entrypoint timeout -d iqtlabs/torchserve:v0.0.4 180s /torchserve/torchserve-entrypoint.sh --models yolov8n=yolov8n.mar +docker run -v $(pwd)/model_store:/model_store -p 8080:8080 --rm --name testts --entrypoint timeout -d iqtlabs/torchserve:v0.0.5 180s /torchserve/torchserve-entrypoint.sh --models yolov8n=yolov8n.mar PRED=$(wget -q --retry-connrefused --retry-on-host-error --body-file=persons.jpg --method=PUT -O- --header='Content-Type: image/jpg' http://127.0.0.1:8080/predictions/yolov8n | jq) echo $PRED if [ "$PRED" = "" ] ; then diff --git a/torchserve-cuda.yml b/torchserve-cuda.yml index 6f8a631a..15fcb3e0 100644 --- a/torchserve-cuda.yml +++ b/torchserve-cuda.yml @@ -4,7 +4,7 @@ networks: services: torchserve: restart: always - image: iqtlabs/cuda-torchserve:v0.0.4 + image: iqtlabs/cuda-torchserve:v0.0.5 networks: - gamutrf ports: diff --git a/torchserve.yml b/torchserve.yml index 7d2e31f8..d76ec958 100644 --- a/torchserve.yml +++ b/torchserve.yml @@ -4,7 +4,7 @@ networks: services: torchserve: restart: always - image: iqtlabs/torchserve:v0.0.4 + image: iqtlabs/torchserve:v0.0.5 networks: - gamutrf ports: