diff --git a/chatmodel-springai/pom.xml b/chatmodel-springai/pom.xml
index d37a31f..a82ae51 100644
--- a/chatmodel-springai/pom.xml
+++ b/chatmodel-springai/pom.xml
@@ -35,6 +35,11 @@
org.apache.httpcomponents.client5
httpclient5
+
+ org.springdoc
+ springdoc-openapi-starter-webmvc-ui
+ 2.4.0
+
org.springframework.boot
diff --git a/chatmodel-springai/src/main/java/com/example/ai/config/LoggingConfig.java b/chatmodel-springai/src/main/java/com/example/ai/config/LoggingConfig.java
index e3d18f7..9017df6 100644
--- a/chatmodel-springai/src/main/java/com/example/ai/config/LoggingConfig.java
+++ b/chatmodel-springai/src/main/java/com/example/ai/config/LoggingConfig.java
@@ -21,7 +21,7 @@
@ConditionalOnProperty(value = "spring.ai.openai.api-key", havingValue = "demo")
public class LoggingConfig {
- private final Logger log = LoggerFactory.getLogger(LoggingConfig.class);
+ private static final Logger LOGGER = LoggerFactory.getLogger(LoggingConfig.class);
@Bean
RestClient.Builder restClientBuilder() {
@@ -40,21 +40,21 @@ RestClient.Builder restClientBuilder() {
}
private void logResponse(ClientHttpResponse response) throws IOException {
- log.info("============================response begin==========================================");
- log.info("Status code : {}", response.getStatusCode());
- log.info("Status text : {}", response.getStatusText());
- log.info("Headers : {}", response.getHeaders());
- log.info("Response body: {}", StreamUtils.copyToString(response.getBody(), Charset.defaultCharset()));
- log.info("=======================response end=================================================");
+ LOGGER.info("============================response begin==========================================");
+ LOGGER.info("Status code : {}", response.getStatusCode());
+ LOGGER.info("Status text : {}", response.getStatusText());
+ LOGGER.info("Headers : {}", response.getHeaders());
+ LOGGER.info("Response body: {}", StreamUtils.copyToString(response.getBody(), Charset.defaultCharset()));
+ LOGGER.info("=======================response end=================================================");
}
private void logRequest(HttpRequest request, byte[] body) {
- log.info("===========================request begin================================================");
- log.info("URI : {}", request.getURI());
- log.info("Method : {}", request.getMethod());
- log.info("Headers : {}", request.getHeaders());
- log.info("Request body: {}", new String(body, StandardCharsets.UTF_8));
- log.info("==========================request end================================================");
+ LOGGER.info("===========================request begin================================================");
+ LOGGER.info("URI : {}", request.getURI());
+ LOGGER.info("Method : {}", request.getMethod());
+ LOGGER.info("Headers : {}", request.getHeaders());
+ LOGGER.info("Request body: {}", new String(body, StandardCharsets.UTF_8));
+ LOGGER.info("==========================request end================================================");
}
}
diff --git a/chatmodel-springai/src/main/resources/application.properties b/chatmodel-springai/src/main/resources/application.properties
index f44305f..e11ba56 100644
--- a/chatmodel-springai/src/main/resources/application.properties
+++ b/chatmodel-springai/src/main/resources/application.properties
@@ -3,7 +3,7 @@ spring.threads.virtual.enabled=true
spring.ai.openai.api-key=demo
spring.ai.openai.base-url=http://langchain4j.dev/demo/openai
spring.ai.openai.chat.options.model=gpt-3.5-turbo
-spring.ai.openai.chat.options.temperature=0.7
+spring.ai.openai.chat.options.temperature=0.2
spring.ai.openai.chat.options.responseFormat=json_object
spring.ai.openai.embedding.enabled=false
diff --git a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/AIConfig.java b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/AIConfig.java
index c2a9289..b5ae911 100644
--- a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/AIConfig.java
+++ b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/AIConfig.java
@@ -2,6 +2,7 @@
import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;
+import com.learning.ai.service.AICustomerSupportAgent;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.apache.pdfbox.ApachePdfBoxDocumentParser;
@@ -59,9 +60,17 @@ EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
}
+ @Bean
+ OpenAiTokenizer openAiTokenizer() {
+ return new OpenAiTokenizer(OpenAiChatModelName.GPT_3_5_TURBO.toString());
+ }
+
@Bean
EmbeddingStore embeddingStore(
- EmbeddingModel embeddingModel, ResourceLoader resourceLoader, JdbcConnectionDetails jdbcConnectionDetails)
+ EmbeddingModel embeddingModel,
+ ResourceLoader resourceLoader,
+ JdbcConnectionDetails jdbcConnectionDetails,
+ OpenAiTokenizer openAiTokenizer)
throws IOException {
// Normally, you would already have your embedding store filled with your data.
@@ -81,6 +90,7 @@ EmbeddingStore embeddingStore(
.password(jdbcConnectionDetails.getPassword())
.database(path.substring(1))
.table("ai_vector_store")
+ .dropTableFirst(true)
.dimension(384)
.build();
@@ -97,8 +107,7 @@ EmbeddingStore embeddingStore(
// 4. Convert segments into embeddings
// 5. Store embeddings into embedding store
// All this can be done manually, but we will use EmbeddingStoreIngestor to automate this:
- DocumentSplitter documentSplitter =
- DocumentSplitters.recursive(500, 0, new OpenAiTokenizer(OpenAiChatModelName.GPT_3_5_TURBO.toString()));
+ DocumentSplitter documentSplitter = DocumentSplitters.recursive(500, 0, openAiTokenizer);
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
diff --git a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/ChatTools.java b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/ChatTools.java
index bf500e2..b7e398e 100644
--- a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/ChatTools.java
+++ b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/ChatTools.java
@@ -12,7 +12,7 @@ public class ChatTools {
/**
* This tool is available to {@link AICustomerSupportAgent}
*/
- @Tool("chatAssistantTools")
+ @Tool(name = "currentTime", value = "the current time is")
String currentTime() {
log.info("Inside ChatTools");
return LocalTime.now().toString();
diff --git a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/AICustomerSupportAgent.java b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/service/AICustomerSupportAgent.java
similarity index 70%
rename from rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/AICustomerSupportAgent.java
rename to rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/service/AICustomerSupportAgent.java
index f941256..e62c34b 100644
--- a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/config/AICustomerSupportAgent.java
+++ b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/service/AICustomerSupportAgent.java
@@ -1,14 +1,16 @@
-package com.learning.ai.config;
+package com.learning.ai.service;
import com.learning.ai.domain.response.AICustomerSupportResponse;
import dev.langchain4j.service.UserMessage;
import dev.langchain4j.service.V;
+import dev.langchain4j.service.spring.AiService;
+@AiService(tools = "currentTime")
public interface AICustomerSupportAgent {
@UserMessage({
"""
- Tell me about {{question}}? as of {{current_date}}
+ Tell me about {{question}}? as of {{current_date_time}}
Use the following information to answer the question:
{{information}}
diff --git a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/service/CustomerSupportService.java b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/service/CustomerSupportService.java
index ef1fe2a..7b1e4d1 100644
--- a/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/service/CustomerSupportService.java
+++ b/rag-langchain4j-AllMiniLmL6V2-llm/src/main/java/com/learning/ai/service/CustomerSupportService.java
@@ -1,6 +1,5 @@
package com.learning.ai.service;
-import com.learning.ai.config.AICustomerSupportAgent;
import com.learning.ai.domain.response.AICustomerSupportResponse;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
diff --git a/rag-springai-ollama-llm/docker/docker-compose.yml b/rag-springai-ollama-llm/docker/docker-compose.yml
new file mode 100644
index 0000000..8a9782f
--- /dev/null
+++ b/rag-springai-ollama-llm/docker/docker-compose.yml
@@ -0,0 +1,10 @@
+version: '3.7'
+services:
+ ollama:
+ image: langchain4j/ollama-orca-mini:latest
+ ports:
+ - '11434:11434'
+ redis-stack:
+ image: redis/redis-stack-server
+ ports:
+ - '6379:6379'
\ No newline at end of file
diff --git a/rag-springai-openai-llm/docker/docker-compose.yml b/rag-springai-openai-llm/docker/docker-compose.yml
new file mode 100644
index 0000000..6519d8d
--- /dev/null
+++ b/rag-springai-openai-llm/docker/docker-compose.yml
@@ -0,0 +1,43 @@
+version: '3.7'
+services:
+ postgresqldb:
+ container_name: postgresqldb
+ image: pgvector/pgvector:pg16
+ extra_hosts: [ 'host.docker.internal:host-gateway' ]
+ restart: always
+ environment:
+ - POSTGRES_USER=appuser
+ - POSTGRES_PASSWORD=secret
+ - POSTGRES_DB=appdb
+ - PGPASSWORD=secret
+ logging:
+ options:
+ max-size: 10m
+ max-file: "3"
+ ports:
+ - '5432:5432'
+ healthcheck:
+ test: "pg_isready -U appuser -d appdb"
+ interval: 2s
+ timeout: 20s
+ retries: 10
+ pgadmin:
+ container_name: pgadmin_container
+ image: dpage/pgadmin4
+ extra_hosts: [ 'host.docker.internal:host-gateway' ]
+ environment:
+ PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL:-pgadmin4@pgadmin.org}
+ PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD:-admin}
+ PGADMIN_CONFIG_SERVER_MODE: "False"
+ PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: "False"
+ ports:
+ - "${PGADMIN_PORT:-5050}:80"
+ depends_on:
+ postgresqldb:
+ condition: service_healthy
+ volumes:
+ - ./docker_pgadmin_servers.json:/pgadmin4/servers.json
+ entrypoint:
+ - "/bin/sh"
+ - "-c"
+ - "/bin/echo 'postgresqldb:5432:*:appuser:secret' > /tmp/pgpassfile && chmod 600 /tmp/pgpassfile && /entrypoint.sh"
diff --git a/rag-springai-openai-llm/docker/docker_pgadmin_servers.json b/rag-springai-openai-llm/docker/docker_pgadmin_servers.json
new file mode 100644
index 0000000..7e97769
--- /dev/null
+++ b/rag-springai-openai-llm/docker/docker_pgadmin_servers.json
@@ -0,0 +1,14 @@
+{
+ "Servers": {
+ "1": {
+ "Name": "Docker Compose DB",
+ "Group": "Servers",
+ "Port": 5432,
+ "Username": "appuser",
+ "Host": "postgresqldb",
+ "SSLMode": "prefer",
+ "MaintenanceDB": "appdb",
+ "PassFile": "/tmp/pgpassfile"
+ }
+ }
+}
\ No newline at end of file