diff --git a/README.md b/README.md index ce318e9c5..a7cadbb1f 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,7 @@ PINECONE_ENVIRONMENT= 4. In the `config` folder, replace the `PINECONE_INDEX_NAME` and `PINECONE_NAME_SPACE` with your own details from your pinecone dashboard. -5. In `utils/makechain.ts` chain change the `QA_PROMPT` for your own usecase. Change `modelName` in `new OpenAIChat` to a different api model if you don't have access to `gpt-4`. +5. In `utils/makechain.ts` chain change the `QA_PROMPT` for your own usecase. Change `modelName` in `new OpenAIChat` to a different api model if you don't have access to `gpt-4`. See [the OpenAI docs](https://platform.openai.com/docs/models/model-endpoint-compatibility) for a list of supported `modelName`s. For example you could use `gpt-3.5-turbo` if you do not have access to `gpt-4`, yet. ## Convert your PDF to embeddings diff --git a/utils/makechain.ts b/utils/makechain.ts index e238c2cab..9eb59916d 100644 --- a/utils/makechain.ts +++ b/utils/makechain.ts @@ -36,7 +36,7 @@ export const makeChain = ( const docChain = loadQAChain( new OpenAIChat({ temperature: 0, - modelName: 'gpt-4', //change this to older versions if you don't have access to gpt-4 + modelName: 'gpt-4', //change this to older versions (e.g. gpt-3.5-turbo) if you don't have access to gpt-4 streaming: Boolean(onTokenStream), callbackManager: onTokenStream ? CallbackManager.fromHandlers({