Use generator args to group arguments to generator #161
Workflow file for this run
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Run eager tests on MPS with dtypes | |
on: | |
pull_request: | |
push: | |
branches: | |
- main | |
workflow_dispatch: | |
jobs: | |
test-mps: | |
uses: pytorch/test-infra/.github/workflows/macos_job.yml@main | |
with: | |
runner: macos-m1-stable | |
script: | | |
set -eou pipefail | |
echo "::group::Print machine info" | |
uname -a | |
if [ $(uname -s) == Darwin ]; then | |
sysctl machdep.cpu.brand_string | |
sysctl machdep.cpu.core_count | |
fi | |
echo "::endgroup::" | |
echo "::group::Install requirements" | |
# Install requirements | |
pip install --pre torch torchvision torchaudio --index-url https://download.pytorch.org/whl/nightly/cpu | |
ls -la | |
pwd | |
pip install -r requirements.txt | |
echo "::endgroup::" | |
echo "::group::Download checkpoints" | |
( | |
mkdir -p checkpoints/stories15M | |
pushd checkpoints/stories15M | |
curl -fsSL -O https://huggingface.co/karpathy/tinyllamas/resolve/main/stories15M.pt | |
curl -fsSL -O https://github.com/karpathy/llama2.c/raw/master/tokenizer.model | |
popd | |
) | |
echo "::endgroup::" | |
echo "::group::Run inference" | |
export MODEL_PATH=checkpoints/stories15M/stories15M.pt | |
export MODEL_NAME=stories15M | |
export MODEL_DIR=/tmp | |
for DTYPE in float16 float32; do | |
# if [ $(uname -s) == Darwin ]; then | |
# export DTYPE=float16 | |
# fi | |
python generate.py --dtype ${DTYPE} --device mps --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
python generate.py --dtype ${DTYPE} --device mps --quant '{"embedding" : {"bitwidth": 8, "groupsize": 0}}' --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
python generate.py --dtype ${DTYPE} --device mps --quant '{"embedding" : {"bitwidth": 8, "groupsize": 8}}' --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
python generate.py --dtype ${DTYPE} --device mps --quant '{"linear:int8" : {"bitwidth": 8, "groupsize": 0}}' --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
python generate.py --dtype ${DTYPE} --device mps --quant '{"linear:int8" : {"bitwidth": 8, "groupsize": 8}}' --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
PYTORCH_ENABLE_MPS_FALLBACK=1 python generate.py --dtype ${DTYPE} --device mps --quant '{"linear:int4" : {"groupsize": 32}}' --checkpoint-path ${MODEL_PATH} --temperature 0 > ./output_eager | |
cat ./output_eager | |
done |