Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

polish code and adds docker compose #30

Merged
merged 1 commit into from
Mar 29, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions chatmodel-springai/pom.xml
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,11 @@
<groupId>org.apache.httpcomponents.client5</groupId>
<artifactId>httpclient5</artifactId>
</dependency>
<dependency>
<groupId>org.springdoc</groupId>
<artifactId>springdoc-openapi-starter-webmvc-ui</artifactId>
<version>2.4.0</version>
</dependency>

<dependency>
<groupId>org.springframework.boot</groupId>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@
@ConditionalOnProperty(value = "spring.ai.openai.api-key", havingValue = "demo")
public class LoggingConfig {

private final Logger log = LoggerFactory.getLogger(LoggingConfig.class);
private static final Logger LOGGER = LoggerFactory.getLogger(LoggingConfig.class);

@Bean
RestClient.Builder restClientBuilder() {
Expand All @@ -40,21 +40,21 @@ RestClient.Builder restClientBuilder() {
}

private void logResponse(ClientHttpResponse response) throws IOException {
log.info("============================response begin==========================================");
log.info("Status code : {}", response.getStatusCode());
log.info("Status text : {}", response.getStatusText());
log.info("Headers : {}", response.getHeaders());
log.info("Response body: {}", StreamUtils.copyToString(response.getBody(), Charset.defaultCharset()));
log.info("=======================response end=================================================");
LOGGER.info("============================response begin==========================================");
LOGGER.info("Status code : {}", response.getStatusCode());
LOGGER.info("Status text : {}", response.getStatusText());
LOGGER.info("Headers : {}", response.getHeaders());
LOGGER.info("Response body: {}", StreamUtils.copyToString(response.getBody(), Charset.defaultCharset()));
LOGGER.info("=======================response end=================================================");
}

private void logRequest(HttpRequest request, byte[] body) {

log.info("===========================request begin================================================");
log.info("URI : {}", request.getURI());
log.info("Method : {}", request.getMethod());
log.info("Headers : {}", request.getHeaders());
log.info("Request body: {}", new String(body, StandardCharsets.UTF_8));
log.info("==========================request end================================================");
LOGGER.info("===========================request begin================================================");
LOGGER.info("URI : {}", request.getURI());
LOGGER.info("Method : {}", request.getMethod());
LOGGER.info("Headers : {}", request.getHeaders());
LOGGER.info("Request body: {}", new String(body, StandardCharsets.UTF_8));
LOGGER.info("==========================request end================================================");
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ spring.threads.virtual.enabled=true
spring.ai.openai.api-key=demo
spring.ai.openai.base-url=http://langchain4j.dev/demo/openai
spring.ai.openai.chat.options.model=gpt-3.5-turbo
spring.ai.openai.chat.options.temperature=0.7
spring.ai.openai.chat.options.temperature=0.2
spring.ai.openai.chat.options.responseFormat=json_object

spring.ai.openai.embedding.enabled=false
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@

import static dev.langchain4j.data.document.loader.FileSystemDocumentLoader.loadDocument;

import com.learning.ai.service.AICustomerSupportAgent;
import dev.langchain4j.data.document.Document;
import dev.langchain4j.data.document.DocumentSplitter;
import dev.langchain4j.data.document.parser.apache.pdfbox.ApachePdfBoxDocumentParser;
Expand Down Expand Up @@ -59,9 +60,17 @@ EmbeddingModel embeddingModel() {
return new AllMiniLmL6V2EmbeddingModel();
}

@Bean
OpenAiTokenizer openAiTokenizer() {
return new OpenAiTokenizer(OpenAiChatModelName.GPT_3_5_TURBO.toString());
}

@Bean
EmbeddingStore<TextSegment> embeddingStore(
EmbeddingModel embeddingModel, ResourceLoader resourceLoader, JdbcConnectionDetails jdbcConnectionDetails)
EmbeddingModel embeddingModel,
ResourceLoader resourceLoader,
JdbcConnectionDetails jdbcConnectionDetails,
OpenAiTokenizer openAiTokenizer)
throws IOException {

// Normally, you would already have your embedding store filled with your data.
Expand All @@ -81,6 +90,7 @@ EmbeddingStore<TextSegment> embeddingStore(
.password(jdbcConnectionDetails.getPassword())
.database(path.substring(1))
.table("ai_vector_store")
.dropTableFirst(true)
.dimension(384)
.build();

Expand All @@ -97,8 +107,7 @@ EmbeddingStore<TextSegment> embeddingStore(
// 4. Convert segments into embeddings
// 5. Store embeddings into embedding store
// All this can be done manually, but we will use EmbeddingStoreIngestor to automate this:
DocumentSplitter documentSplitter =
DocumentSplitters.recursive(500, 0, new OpenAiTokenizer(OpenAiChatModelName.GPT_3_5_TURBO.toString()));
DocumentSplitter documentSplitter = DocumentSplitters.recursive(500, 0, openAiTokenizer);
EmbeddingStoreIngestor ingestor = EmbeddingStoreIngestor.builder()
.documentSplitter(documentSplitter)
.embeddingModel(embeddingModel)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ public class ChatTools {
/**
* This tool is available to {@link AICustomerSupportAgent}
*/
@Tool("chatAssistantTools")
@Tool(name = "currentTime", value = "the current time is")
String currentTime() {
log.info("Inside ChatTools");
return LocalTime.now().toString();
Expand Down
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
package com.learning.ai.config;
package com.learning.ai.service;

import com.learning.ai.domain.response.AICustomerSupportResponse;
import dev.langchain4j.service.UserMessage;
import dev.langchain4j.service.V;
import dev.langchain4j.service.spring.AiService;

@AiService(tools = "currentTime")
public interface AICustomerSupportAgent {

@UserMessage({
"""
Tell me about {{question}}? as of {{current_date}}
Tell me about {{question}}? as of {{current_date_time}}

Use the following information to answer the question:
{{information}}
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
package com.learning.ai.service;

import com.learning.ai.config.AICustomerSupportAgent;
import com.learning.ai.domain.response.AICustomerSupportResponse;
import dev.langchain4j.data.embedding.Embedding;
import dev.langchain4j.data.segment.TextSegment;
Expand Down
10 changes: 10 additions & 0 deletions rag-springai-ollama-llm/docker/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
version: '3.7'
services:
ollama:
image: langchain4j/ollama-orca-mini:latest
ports:
- '11434:11434'
redis-stack:
image: redis/redis-stack-server
ports:
- '6379:6379'
43 changes: 43 additions & 0 deletions rag-springai-openai-llm/docker/docker-compose.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,43 @@
version: '3.7'
services:
postgresqldb:
container_name: postgresqldb
image: pgvector/pgvector:pg16
extra_hosts: [ 'host.docker.internal:host-gateway' ]
restart: always
environment:
- POSTGRES_USER=appuser
- POSTGRES_PASSWORD=secret
- POSTGRES_DB=appdb
- PGPASSWORD=secret
logging:
options:
max-size: 10m
max-file: "3"
ports:
- '5432:5432'
healthcheck:
test: "pg_isready -U appuser -d appdb"
interval: 2s
timeout: 20s
retries: 10
pgadmin:
container_name: pgadmin_container
image: dpage/pgadmin4
extra_hosts: [ 'host.docker.internal:host-gateway' ]
environment:
PGADMIN_DEFAULT_EMAIL: ${PGADMIN_DEFAULT_EMAIL:[email protected]}
PGADMIN_DEFAULT_PASSWORD: ${PGADMIN_DEFAULT_PASSWORD:-admin}
PGADMIN_CONFIG_SERVER_MODE: "False"
PGADMIN_CONFIG_MASTER_PASSWORD_REQUIRED: "False"
ports:
- "${PGADMIN_PORT:-5050}:80"
depends_on:
postgresqldb:
condition: service_healthy
volumes:
- ./docker_pgadmin_servers.json:/pgadmin4/servers.json
entrypoint:
- "/bin/sh"
- "-c"
- "/bin/echo 'postgresqldb:5432:*:appuser:secret' > /tmp/pgpassfile && chmod 600 /tmp/pgpassfile && /entrypoint.sh"
14 changes: 14 additions & 0 deletions rag-springai-openai-llm/docker/docker_pgadmin_servers.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
{
"Servers": {
"1": {
"Name": "Docker Compose DB",
"Group": "Servers",
"Port": 5432,
"Username": "appuser",
"Host": "postgresqldb",
"SSLMode": "prefer",
"MaintenanceDB": "appdb",
"PassFile": "/tmp/pgpassfile"
}
}
}
Loading