diff --git a/.github/workflows/test-action.yml b/.github/workflows/test-action.yml index 5a1f111..f3ffee4 100644 --- a/.github/workflows/test-action.yml +++ b/.github/workflows/test-action.yml @@ -53,7 +53,7 @@ jobs: githubRepository: ${{ github.repository }} githubPullRequestNumber: ${{ github.event.pull_request.number }} gitCommitHash: ${{ github.event.pull_request.head.sha }} - repoId: "meta-llama/Llama-2-7b-chat-hf" + repoId: "codellama/CodeLlama-34b-Instruct-hf" temperature: "0.2" maxNewTokens: "250" topK: "50" diff --git a/README.md b/README.md index eadc4eb..596f8c3 100644 --- a/README.md +++ b/README.md @@ -69,7 +69,7 @@ jobs: githubRepository: ${{ github.repository }} githubPullRequestNumber: ${{ github.event.pull_request.number }} gitCommitHash: ${{ github.event.pull_request.head.sha }} - repoId: "meta-llama/Llama-2-7b-chat-hf" + repoId: "codellama/CodeLlama-34b-Instruct-hf" temperature: "0.2" maxNewTokens: "250" topK: "50" diff --git a/action.yml b/action.yml index d36b927..cda3312 100644 --- a/action.yml +++ b/action.yml @@ -23,7 +23,7 @@ inputs: repoId: description: "LLM model" required: true - default: "meta-llama/Llama-2-7b-chat-hf" + default: "codellama/CodeLlama-34b-Instruct-hf" maxNewTokens: description: "The amount of new tokens to be generated, this does not include the input length it is a estimate of the size of generated text you want. Each new tokens slows down the request, so look for balance between response times and length of text generated." required: false