Skip to content

Commit

Permalink
add some basic run.sh to facilitate testing recipes, fix some configs
Browse files Browse the repository at this point in the history
  • Loading branch information
francoishernandez committed Sep 20, 2024
1 parent 00eed8b commit 955a27f
Show file tree
Hide file tree
Showing 8 changed files with 382 additions and 7 deletions.
7 changes: 7 additions & 0 deletions recipes/gpt2/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
# naive script with commands from the readme
# (useful to make sure the recipe still runs)

eole convert HF --model_dir openai-community/gpt2 --output $EOLE_MODEL_DIR/openai_gpt2 --token $HF_TOKEN
echo -e "The European Union was created in" > lm_input.txt
eole predict -c inference.yaml
eole tools eval_hellaswag -c inference.yaml
20 changes: 20 additions & 0 deletions recipes/llama2/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
# naive script with commands from the readme
# (useful to make sure the recipe still runs)

eole convert HF --model_dir meta-llama/Llama-2-7b-chat-hf --output $EOLE_MODEL_DIR/llama2-7b-chat-hf --token $HF_TOKEN
echo -e "<s>[INST] <<SYS>>
You are a helpful, respectful and honest assistant. Always answer as helpfully as possible, while being safe. Your answers should not include any harmful, unethical, racist, sexist, toxic, dangerous, or illegal content. Please ensure that your responses are socially unbiased and positive in nature.
If a question does not make any sense, or is not factually coherent, explain why instead of answering something not correct. If you don't know the answer to a question, please don't share false information.
<</SYS>>
What are some nice places to visit in France? [/INST]" | sed ':a;N;$!ba;s/\n/⦅newline⦆/g' > test_prompt.txt
eole predict -c llama-inference.yaml -src test_prompt.txt -output test_output.txt
eole predict -c llama-inference-tp-2gpu.yaml -src test_prompt.txt -output test_output.txt
[ ! -d ./data ] && mkdir ./data
# Alpaca
wget -P ./data https://opennmt-models.s3.amazonaws.com/llama/alpaca_clean.txt
# Vicuna
wget -P ./data https://opennmt-models.s3.amazonaws.com/llama/sharegpt.txt
# Open Assisstant
wget -P ./data https://opennmt-models.s3.amazonaws.com/llama/osst1.flattened.txt
eole train -c llama-finetune.yaml
eole model lora --action merge --base_model ${EOLE_MODEL_DIR}/llama2-7b-chat-hf --lora_weights ./finetune/llama2-7b-chat-hf-finetune --output ./finetune/merged
1 change: 0 additions & 1 deletion recipes/llama3.1/llama-inference.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,6 @@ model_path: "${EOLE_MODEL_DIR}/llama3.1-8b"
seed: 42
max_length: 256
# max_length: 1
gpu: 0
batch_type: sents
batch_size: 4
world_size: 1
Expand Down
341 changes: 341 additions & 0 deletions recipes/llama3.1/run.sh

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions recipes/llama3/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# naive script with commands from the readme
# (useful to make sure the recipe still runs)

eole convert HF --model_dir meta-llama/Meta-Llama-3-8B-Instruct --output $EOLE_MODEL_DIR/llama3-8b-instruct --token $HF_TOKEN
echo -e "What are some nice places to visit in France?" | sed ':a;N;$!ba;s/\n/⦅newline⦆/g' > test_prompt.txt
eole predict -c llama-inference.yaml -src test_prompt.txt -output test_output.txt
2 changes: 1 addition & 1 deletion recipes/mistral/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@ export HF_TOKEN=<your_hf_token>
### Download and convert model

```
eole convert HF --model_dir TheBloke/Mistral-7B-Instruct-v0.2-AWQ --output ${EOLE_MODEL_DIR}/mistral-7b-instruct-v0.2-awq --token $HF_TOKEN
eole convert HF --model_dir mistralai/Mistral-7B-v0.3 --output ${EOLE_MODEL_DIR}/mistral-7b-v0.3 --token $HF_TOKEN
```


Expand Down
9 changes: 4 additions & 5 deletions recipes/mistral/mistral-7b-awq-gemm-inference.yaml
Original file line number Diff line number Diff line change
@@ -1,11 +1,11 @@
transforms: [sentencepiece]
transforms_configs:
sentencepiece:
src_subword_model: "$EOLE_MODEL_DIR/mistral-7b-v0.3/tokenizer.model"
tgt_subword_model: "$EOLE_MODEL_DIR/mistral-7b-v0.3/tokenizer.model"
src_subword_model: "$EOLE_MODEL_DIR/mistral-7b-instruct-v0.2-awq/tokenizer.model"
tgt_subword_model: "$EOLE_MODEL_DIR/mistral-7b-instruct-v0.2-awq/tokenizer.model"

# Model info
model_path: "$EOLE_MODEL_DIR/mistral-7b-v0.3"
model_path: "$EOLE_MODEL_DIR/mistral-7b-instruct-v0.2-awq"

# Inference
seed: 42
Expand All @@ -19,8 +19,7 @@ gpu_ranks: [0]
# parallel_mode: "tensor_parallel"
#quant_layers: ['gate_up_proj', 'down_proj', 'up_proj', 'linear_values', 'linear_query', 'linear_keys', 'final_linear']
#quant_type: "bnb_NF4"
# precision: fp16
precision: fp16
compute_dtype: fp16
#top_k: 1
#top_p: 0.6
#temperature: 0.9
Expand Down
3 changes: 3 additions & 0 deletions recipes/mistral/run.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
eole convert HF --model_dir mistralai/Mistral-7B-v0.3 --output ${EOLE_MODEL_DIR}/mistral-7b-v0.3 --token $HF_TOKEN
echo -e "What are some nice places to visit in France?" | sed ':a;N;$!ba;s/\n/⦅newline⦆/g' > test_prompt.txt
eole predict -c mistral-7b-awq-gemm-inference.yaml -src test_prompt.txt -output test_output.txt

0 comments on commit 955a27f

Please sign in to comment.