Skip to content

Commit

Permalink
feat : using OOTB contentRetrieval Bean
Browse files Browse the repository at this point in the history
  • Loading branch information
rajadilipkolli committed Mar 27, 2024
1 parent fdb1bd8 commit 790a722
Show file tree
Hide file tree
Showing 2 changed files with 3 additions and 18 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@
import dev.langchain4j.model.openai.OpenAiChatModelName;
import dev.langchain4j.model.openai.OpenAiTokenizer;
import dev.langchain4j.rag.content.retriever.ContentRetriever;
import dev.langchain4j.rag.content.retriever.EmbeddingStoreContentRetriever;
import dev.langchain4j.service.AiServices;
import dev.langchain4j.store.embedding.EmbeddingStore;
import dev.langchain4j.store.embedding.EmbeddingStoreIngestor;
Expand Down Expand Up @@ -56,23 +55,6 @@ ChatMemory chatMemory() {
// return TokenWindowChatMemory.withMaxTokens(1000, tokenizer);
// }

@Bean
ContentRetriever contentRetriever(EmbeddingStore<TextSegment> embeddingStore, EmbeddingModel embeddingModel) {

// You will need to adjust these parameters to find the optimal setting, which will depend on two main factors:
// - The nature of your data
// - The embedding model you are using
int maxResults = 1;
double minScore = 0.6;

return EmbeddingStoreContentRetriever.builder()
.embeddingStore(embeddingStore)
.embeddingModel(embeddingModel)
.maxResults(maxResults)
.minScore(minScore)
.build();
}

@Bean
EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ langchain4j.open-ai.chat-model.timeout=PT60S
langchain4j.open-ai.chat-model.log-requests=true
langchain4j.open-ai.chat-model.log-responses=true

langchain4j.rag.retrieval.maxResults=1
langchain4j.rag.retrieval.minScore=0.6

logging.level.dev.langchain4j=INFO
logging.level.dev.ai4j.openai4j=INFO

Expand Down

0 comments on commit 790a722

Please sign in to comment.