diff --git a/client/.env.example b/client/.env.example index 91b446e3..a493a459 100644 --- a/client/.env.example +++ b/client/.env.example @@ -1,3 +1,2 @@ SELF_PATH=/client/ -SUPABASE_URL=http://localhost:8000 -SUPABASE_API_KEY=eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyAgCiAgICAicm9sZSI6ICJhbm9uIiwKICAgICJpc3MiOiAic3VwYWJhc2UtZGVtbyIsCiAgICAiaWF0IjogMTY0MTc2OTIwMCwKICAgICJleHAiOiAxNzk5NTM1NjAwCn0.dc_X5iR_VP_qT0zsiyj_I_OZ2T9FtRU2BBNWN8Bu4GE \ No newline at end of file +NEXT_PUBLIC_API_URL=http://localhost:3000/api diff --git a/client/app/agents/page.tsx b/client/app/agents/page.tsx deleted file mode 100644 index cbed85e1..00000000 --- a/client/app/agents/page.tsx +++ /dev/null @@ -1,85 +0,0 @@ -import { ChatWindow } from '@/components/chat/ChatWindow'; - -export default function AgentsPage() { - const InfoCard = ( -
-

🦉 xuexiao

- -
- ); - return ( - - ); -} diff --git a/client/app/api/auth/[auth0]/route.ts b/client/app/api/auth/[auth0]/route.ts deleted file mode 100644 index 81cf645a..00000000 --- a/client/app/api/auth/[auth0]/route.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { supabase } from '@/share/supabas-client'; -import { AppRouteHandlerFnContext, Session, handleAuth, handleCallback } from '@auth0/nextjs-auth0'; -import { NextRequest } from 'next/server'; - -export const GET = handleAuth({ - callback: async (req: NextRequest, ctx: AppRouteHandlerFnContext) => { - return await handleCallback(req, ctx, { - afterCallback: async (_req: NextRequest, session: Session) => { - const user = session.user; - await supabase.from('profiles').upsert( - [{ - id: user.sub, - nickname: user.nickname, - name: user.name, - picture: user.picture, - sid: user.sid, - sub: user.sub, - }] - ); - - return session; - } - }); - } -}); diff --git a/client/app/api/bot/config/route.ts b/client/app/api/bot/config/route.ts deleted file mode 100644 index 69819ed6..00000000 --- a/client/app/api/bot/config/route.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { Tables } from '@/types/database.types'; -import { NextResponse } from 'next/server'; -import { supabase } from '@/share/supabas-client'; -import { getSession } from '@auth0/nextjs-auth0/edge'; - -export const runtime = 'edge'; - -export const GET = async (request: Request) => { - const { searchParams } = new URL(request.url); - const id = searchParams.get('id'); - - const session = await getSession(); - const uid = session!.user.sub; - if (!id || !uid) { - return NextResponse.json({ error: 'Auth failed' }, { status: 401 }); - } - try { - const res = await supabase - .from('bots') - .select('*') - .eq('id', id) - .eq('uid', uid); - if (res?.error) { - return NextResponse.json({ error: res?.error?.message }, { status: 400 }); - } - const bots = res?.data ?? ([] as Tables<'bots'>[]); - return NextResponse.json({ data: bots }, { status: 200 }); - } catch (err) { - return NextResponse.json({ error: 'Server error' }, { status: 500 }); - } -}; diff --git a/client/app/api/bot/create/route.ts b/client/app/api/bot/create/route.ts deleted file mode 100644 index f5ffe3e6..00000000 --- a/client/app/api/bot/create/route.ts +++ /dev/null @@ -1,23 +0,0 @@ -import { supabase } from '@/share/supabas-client'; -import { NextRequest, NextResponse } from 'next/server'; -import { getSession } from '@auth0/nextjs-auth0/edge'; - -export const runtime = 'edge'; - -export async function POST(req: NextRequest) { - const session = await getSession(); - try { - const body = await req.json(); - const { data, error } = await supabase - .from('bots') - .insert([{ ...body, uid: session!.user.sub }]) - .select(); - if (error) { - return NextResponse.json({ error: error?.message }, { status: 400 }); - } - - return NextResponse.json({ data: data?.[0] }, { status: 200 }); - } catch (err) { - return NextResponse.json({ error: 'Server error' }, { status: 500 }); - } -} diff --git a/client/app/api/bot/delete/route.ts b/client/app/api/bot/delete/route.ts deleted file mode 100644 index 7474e2af..00000000 --- a/client/app/api/bot/delete/route.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { supabase } from '@/share/supabas-client'; -import { NextRequest, NextResponse } from 'next/server'; -import { getSession } from '@auth0/nextjs-auth0/edge'; - -export const runtime = 'edge'; - -export async function DELETE(request: NextRequest) { - const { searchParams } = new URL(request.url); - const id = searchParams.get('id'); - - const session = await getSession(); - const uid = session!.user.sub; - if (!id || !uid) { - return NextResponse.json({ error: 'Auth failed' }, { status: 401 }); - } - - try { - const uid = session!.user.sub; - const { error } = await supabase - .from('bots') - .delete() - .eq('id', id) - .eq('uid', uid); - - if (error) { - return NextResponse.json({ error: error?.message }, { status: 400 }); - } - - return NextResponse.json({ id }, { status: 200 }); - } catch (err) { - return NextResponse.json({ error: 'Server error' }, { status: 500 }); - } -} diff --git a/client/app/api/bot/detail/route.ts b/client/app/api/bot/detail/route.ts deleted file mode 100644 index 279fd8a3..00000000 --- a/client/app/api/bot/detail/route.ts +++ /dev/null @@ -1,33 +0,0 @@ -import { Tables } from '@/types/database.types'; -import { NextResponse } from 'next/server'; -import { supabase } from '@/share/supabas-client'; - -export const runtime = 'edge'; - -export const GET = async (request: Request) => { - const { searchParams } = new URL(request.url); - const id = searchParams.get('id'); - - if (!id) { - return NextResponse.json( - { error: 'Incomplete parameters' }, - { status: 400 }, - ); - } - - try { - const res = await supabase - .from('bots') - .select( - 'id, created_at, updated_at, avatar, description, enable_img_generation, label, name, starters, voice, public', - ) - .eq('id', id); - if (res?.error) { - return NextResponse.json({ error: res?.error?.message }, { status: 400 }); - } - const bots = res?.data ?? ([] as Tables<'bots'>[]); - return NextResponse.json({ data: bots }, { status: 200 }); - } catch (err) { - return NextResponse.json({ error: 'Server error' }, { status: 500 }); - } -}; diff --git a/client/app/api/bot/list/route.ts b/client/app/api/bot/list/route.ts deleted file mode 100644 index b5997359..00000000 --- a/client/app/api/bot/list/route.ts +++ /dev/null @@ -1,41 +0,0 @@ -import { supabase } from '@/share/supabas-client'; -import { Tables } from '@/types/database.types'; -import { NextResponse } from 'next/server'; -import { getSession } from '@auth0/nextjs-auth0/edge'; - -export const runtime = 'edge'; - -export const GET = async (request: Request) => { - const { searchParams } = new URL(request.url); - const personal = searchParams.get('personal'); - - try { - let res; - if (personal !== 'true') { - res = await supabase - .from('bots') - .select( - 'id, created_at, updated_at, avatar, description, enable_img_generation, label, name, starters, voice, public', - ) - .eq('public', true) - .order('created_at', { ascending: false }); - } else { - const session = await getSession(); - const uid = session!.user.sub; - res = await supabase - .from('bots') - .select( - 'id, created_at, updated_at, avatar, description, enable_img_generation, label, name, starters, voice, public', - ) - .eq('uid', uid) - .order('created_at', { ascending: false }); - } - if (res?.error) { - return NextResponse.json({ error: res?.error?.message }, { status: 400 }); - } - const bots = res?.data ?? ([] as Tables<'bots'>[]); - return NextResponse.json({ data: bots }, { status: 200 }); - } catch (err) { - return NextResponse.json({ error: 'Server error' }, { status: 500 }); - } -}; diff --git a/client/app/api/bot/update/route.ts b/client/app/api/bot/update/route.ts deleted file mode 100644 index daf2d355..00000000 --- a/client/app/api/bot/update/route.ts +++ /dev/null @@ -1,34 +0,0 @@ -import { supabase } from '@/share/supabas-client'; -import { omit } from 'lodash'; -import { NextRequest, NextResponse } from 'next/server'; -import { getSession } from '@auth0/nextjs-auth0/edge'; - -export const runtime = 'edge'; - -export async function POST(req: NextRequest) { - const session = await getSession(); - try { - const uid = session!.user.sub; - const body = await req.json(); - const id = body?.id; - if (!id || !uid) { - return NextResponse.json({ error: 'Auth failed' }, { status: 401 }); - } - - const params = omit(body, 'id'); - const { data, error } = await supabase - .from('bots') - .update(params) - .eq('id', id) - .eq('uid', uid) - .select(); - - if (error) { - return NextResponse.json({ error: error?.message }, { status: 400 }); - } - - return NextResponse.json({ data }, { status: 200 }); - } catch (err) { - return NextResponse.json({ error: 'Server error' }, { status: 500 }); - } -} diff --git a/client/app/api/chat/agents/route.ts b/client/app/api/chat/agents/route.ts deleted file mode 100644 index b7d00091..00000000 --- a/client/app/api/chat/agents/route.ts +++ /dev/null @@ -1,104 +0,0 @@ -import { NextRequest, NextResponse } from "next/server"; -import { Message as VercelChatMessage, StreamingTextResponse } from "ai"; - -import { initializeAgentExecutorWithOptions } from "langchain/agents"; -import { ChatOpenAI } from "langchain/chat_models/openai"; -import { SerpAPI } from "langchain/tools"; -import { Calculator } from "langchain/tools/calculator"; - -import { AIMessage, ChatMessage, HumanMessage } from "langchain/schema"; -import { BufferMemory, ChatMessageHistory } from "langchain/memory"; - -export const runtime = "edge"; - -const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => { - if (message.role === "user") { - return new HumanMessage(message.content); - } else if (message.role === "assistant") { - return new AIMessage(message.content); - } else { - return new ChatMessage(message.content, message.role); - } -}; - -const PREFIX_TEMPLATE = `You are a talking parrot named Polly. All final responses must be how a talking parrot would respond.`; - -/** - * This handler initializes and calls an OpenAI Functions agent. - * See the docs for more information: - * - * https://js.langchain.com/docs/modules/agents/agent_types/openai_functions_agent - */ -export async function POST(req: NextRequest) { - try { - const body = await req.json(); - /** - * We represent intermediate steps as system messages for display purposes, - * but don't want them in the chat history. - */ - const messages = (body.messages ?? []).filter( - (message: VercelChatMessage) => - message.role === "user" || message.role === "assistant", - ); - const returnIntermediateSteps = body.show_intermediate_steps; - const previousMessages = messages - .slice(0, -1) - .map(convertVercelMessageToLangChainMessage); - const currentMessageContent = messages[messages.length - 1].content; - - // Requires process.env.SERPAPI_API_KEY to be set: https://serpapi.com/ - const tools = [new Calculator(), new SerpAPI()]; - const chat = new ChatOpenAI({ modelName: "gpt-4", temperature: 0 }); - - /** - * The default prompt for the OpenAI functions agent has a placeholder - * where chat messages get injected - that's why we set "memoryKey" to - * "chat_history". This will be made clearer and more customizable in the future. - */ - const executor = await initializeAgentExecutorWithOptions(tools, chat, { - agentType: "openai-functions", - verbose: true, - returnIntermediateSteps, - memory: new BufferMemory({ - memoryKey: "chat_history", - chatHistory: new ChatMessageHistory(previousMessages), - returnMessages: true, - outputKey: "output", - }), - agentArgs: { - prefix: PREFIX_TEMPLATE, - }, - }); - - const result = await executor.call({ - input: currentMessageContent, - }); - - // Intermediate steps are too complex to stream - if (returnIntermediateSteps) { - return NextResponse.json( - { output: result.output, intermediate_steps: result.intermediateSteps }, - { status: 200 }, - ); - } else { - /** - * Agent executors don't support streaming responses (yet!), so stream back the - * complete response one character at a time with a delay to simluate it. - */ - const textEncoder = new TextEncoder(); - const fakeStream = new ReadableStream({ - async start(controller) { - for (const character of result.output) { - controller.enqueue(textEncoder.encode(character)); - await new Promise((resolve) => setTimeout(resolve, 20)); - } - controller.close(); - }, - }); - - return new StreamingTextResponse(fakeStream); - } - } catch (e: any) { - return NextResponse.json({ error: e.message }, { status: 500 }); - } -} diff --git a/client/app/api/chat/retrieval/route.ts b/client/app/api/chat/retrieval/route.ts deleted file mode 100644 index b0d5d4f1..00000000 --- a/client/app/api/chat/retrieval/route.ts +++ /dev/null @@ -1,170 +0,0 @@ -import { NextRequest, NextResponse } from 'next/server'; -import { Message as VercelChatMessage, StreamingTextResponse } from 'ai'; - -import { createClient } from '@supabase/supabase-js'; - -import { ChatOpenAI } from 'langchain/chat_models/openai'; -import { PromptTemplate } from 'langchain/prompts'; -import { SupabaseVectorStore } from 'langchain/vectorstores/supabase'; -import { Document } from 'langchain/document'; -import { RunnableSequence } from 'langchain/schema/runnable'; -import { - BytesOutputParser, - StringOutputParser, -} from 'langchain/schema/output_parser'; -import { OpenAIEmbeddings } from 'langchain/embeddings/openai'; - -export const runtime = 'edge'; - -const combineDocumentsFn = (docs: Document[], separator = '\n\n') => { - const serializedDocs = docs.map((doc) => doc.pageContent); - return serializedDocs.join(separator); -}; - -const formatVercelMessages = (chatHistory: VercelChatMessage[]) => { - const formattedDialogueTurns = chatHistory.map((message) => { - if (message.role === 'user') { - return `Human: ${message.content}`; - } else if (message.role === 'assistant') { - return `Assistant: ${message.content}`; - } else { - return `${message.role}: ${message.content}`; - } - }); - return formattedDialogueTurns.join('\n'); -}; - -const CONDENSE_QUESTION_TEMPLATE = `Given the following conversation and a follow up question, rephrase the follow up question to be a standalone question, in its original language. - - - {chat_history} - - -Follow Up Input: {question} -Standalone question:`; -const condenseQuestionPrompt = PromptTemplate.fromTemplate( - CONDENSE_QUESTION_TEMPLATE, -); - -const ANSWER_TEMPLATE = `You are an energetic talking puppy named Dana, and must answer all questions like a happy, talking dog would. -Use lots of puns! - -Answer the question based only on the following context and chat history: - - {context} - - - - {chat_history} - - -Question: {question} -`; -const answerPrompt = PromptTemplate.fromTemplate(ANSWER_TEMPLATE); - -/** - * This handler initializes and calls a retrieval chain. It composes the chain using - * LangChain Expression Language. See the docs for more information: - * - * https://js.langchain.com/docs/guides/expression_language/cookbook#conversational-retrieval-chain - */ -export async function POST(req: NextRequest) { - try { - const body = await req.json(); - const messages = body.messages ?? []; - const previousMessages = messages.slice(0, -1); - const currentMessageContent = messages[messages.length - 1].content; - - const model = new ChatOpenAI({ - modelName: 'gpt-3.5-turbo', - temperature: 0.2, - }); - - const client = createClient( - process.env.SUPABASE_URL!, - process.env.SUPABASE_API_KEY!, - ); - const vectorstore = new SupabaseVectorStore(new OpenAIEmbeddings(), { - client, - tableName: 'documents', - queryName: 'match_documents', - }); - - /** - * We use LangChain Expression Language to compose two chains. - * To learn more, see the guide here: - * - * https://js.langchain.com/docs/guides/expression_language/cookbook - */ - const standaloneQuestionChain = RunnableSequence.from([ - condenseQuestionPrompt, - model, - new StringOutputParser(), - ]); - - let resolveWithDocuments: (value: Document[]) => void; - const documentPromise = new Promise((resolve) => { - resolveWithDocuments = resolve; - }); - - const retriever = vectorstore.asRetriever({ - callbacks: [ - { - handleRetrieverEnd(documents) { - resolveWithDocuments(documents); - }, - }, - ], - }); - - const retrievalChain = retriever.pipe(combineDocumentsFn); - - const answerChain = RunnableSequence.from([ - { - context: RunnableSequence.from([ - (input) => input.question, - retrievalChain, - ]), - chat_history: (input) => input.chat_history, - question: (input) => input.question, - }, - answerPrompt, - model, - ]); - - const conversationalRetrievalQAChain = RunnableSequence.from([ - { - question: standaloneQuestionChain, - chat_history: (input) => input.chat_history, - }, - answerChain, - new BytesOutputParser(), - ]); - - const stream = await conversationalRetrievalQAChain.stream({ - question: currentMessageContent, - chat_history: formatVercelMessages(previousMessages), - }); - - const documents = await documentPromise; - const serializedSources = Buffer.from( - JSON.stringify( - documents.map((doc) => { - return { - pageContent: doc.pageContent.slice(0, 50) + '...', - metadata: doc.metadata, - }; - }), - ), - ).toString('base64'); - - return new StreamingTextResponse(stream, { - headers: { - 'x-message-index': (previousMessages.length + 1).toString(), - 'x-sources': serializedSources, - }, - }); - } catch (e: any) { - return NextResponse.json({ error: e.message }, { status: 500 }); - } -} diff --git a/client/app/api/chat/retrieval_agents/route.ts b/client/app/api/chat/retrieval_agents/route.ts deleted file mode 100644 index 96e7a208..00000000 --- a/client/app/api/chat/retrieval_agents/route.ts +++ /dev/null @@ -1,139 +0,0 @@ -import { NextRequest, NextResponse } from 'next/server'; -import { Message as VercelChatMessage, StreamingTextResponse } from 'ai'; - -import { createClient } from '@supabase/supabase-js'; - -import { ChatOpenAI } from 'langchain/chat_models/openai'; -import { SupabaseVectorStore } from 'langchain/vectorstores/supabase'; -import { AIMessage, ChatMessage, HumanMessage } from 'langchain/schema'; -import { OpenAIEmbeddings } from 'langchain/embeddings/openai'; -import { - createRetrieverTool, - OpenAIAgentTokenBufferMemory, -} from 'langchain/agents/toolkits'; -import { ChatMessageHistory } from 'langchain/memory'; -import { initializeAgentExecutorWithOptions } from 'langchain/agents'; - -export const runtime = 'edge'; - -const convertVercelMessageToLangChainMessage = (message: VercelChatMessage) => { - if (message.role === 'user') { - return new HumanMessage(message.content); - } else if (message.role === 'assistant') { - return new AIMessage(message.content); - } else { - return new ChatMessage(message.content, message.role); - } -}; - -const TEMPLATE = `You are a stereotypical robot named Robbie and must answer all questions like a stereotypical robot. Use lots of interjections like "BEEP" and "BOOP". - -If you don't know how to answer a question, use the available tools to look up relevant information. You should particularly do this for questions about LangChain.`; - -/** - * This handler initializes and calls a retrieval agent. It requires an OpenAI - * Functions model. See the docs for more information: - * - * https://js.langchain.com/docs/use_cases/question_answering/conversational_retrieval_agents - */ -export async function POST(req: NextRequest) { - try { - const body = await req.json(); - /** - * We represent intermediate steps as system messages for display purposes, - * but don't want them in the chat history. - */ - const messages = (body.messages ?? []).filter( - (message: VercelChatMessage) => - message.role === 'user' || message.role === 'assistant', - ); - const returnIntermediateSteps = body.show_intermediate_steps; - const previousMessages = messages.slice(0, -1); - const currentMessageContent = messages[messages.length - 1].content; - - const model = new ChatOpenAI({ - modelName: 'gpt-4', - }); - - const client = createClient( - process.env.SUPABASE_URL!, - process.env.SUPABASE_API_KEY!, - ); - const vectorstore = new SupabaseVectorStore(new OpenAIEmbeddings(), { - client, - tableName: 'documents', - queryName: 'match_documents', - }); - - const chatHistory = new ChatMessageHistory( - previousMessages.map(convertVercelMessageToLangChainMessage), - ); - - /** - * This is a special type of memory specifically for conversational - * retrieval agents. - * It tracks intermediate steps as well as chat history up to a - * certain number of tokens. - * - * The default OpenAI Functions agent prompt has a placeholder named - * "chat_history" where history messages get injected - this is why - * we set "memoryKey" to "chat_history". This will be made clearer - * in a future release. - */ - const memory = new OpenAIAgentTokenBufferMemory({ - llm: model, - memoryKey: 'chat_history', - outputKey: 'output', - chatHistory, - }); - - const retriever = vectorstore.asRetriever(); - - /** - * Wrap the retriever in a tool to present it to the agent in a - * usable form. - */ - const tool = createRetrieverTool(retriever, { - name: 'search_latest_knowledge', - description: 'Searches and returns up-to-date general information.', - }); - - const executor = await initializeAgentExecutorWithOptions([tool], model, { - agentType: 'openai-functions', - memory, - returnIntermediateSteps: true, - verbose: true, - agentArgs: { - prefix: TEMPLATE, - }, - }); - - const result = await executor.call({ - input: currentMessageContent, - }); - - if (returnIntermediateSteps) { - return NextResponse.json( - { output: result.output, intermediate_steps: result.intermediateSteps }, - { status: 200 }, - ); - } else { - // Agent executors don't support streaming responses (yet!), so stream back the complete response one - // character at a time to simluate it. - const textEncoder = new TextEncoder(); - const fakeStream = new ReadableStream({ - async start(controller) { - for (const character of result.output) { - controller.enqueue(textEncoder.encode(character)); - await new Promise((resolve) => setTimeout(resolve, 20)); - } - controller.close(); - }, - }); - - return new StreamingTextResponse(fakeStream); - } - } catch (e: any) { - return NextResponse.json({ error: e.message }, { status: 500 }); - } -} diff --git a/client/app/api/chat/structured_output/route.ts b/client/app/api/chat/structured_output/route.ts deleted file mode 100644 index 67370445..00000000 --- a/client/app/api/chat/structured_output/route.ts +++ /dev/null @@ -1,90 +0,0 @@ -import { NextRequest, NextResponse } from "next/server"; - -import { z } from "zod"; -import { zodToJsonSchema } from "zod-to-json-schema"; - -import { ChatOpenAI } from "langchain/chat_models/openai"; -import { PromptTemplate } from "langchain/prompts"; -import { JsonOutputFunctionsParser } from "langchain/output_parsers"; - -export const runtime = "edge"; - -const TEMPLATE = `Extract the requested fields from the input. - -The field "entity" refers to the first mentioned entity in the input. - -Input: - -{input}`; - -/** - * This handler initializes and calls an OpenAI Functions powered - * structured output chain. See the docs for more information: - * - * https://js.langchain.com/docs/modules/chains/popular/structured_output - */ -export async function POST(req: NextRequest) { - try { - const body = await req.json(); - const messages = body.messages ?? []; - const currentMessageContent = messages[messages.length - 1].content; - - const prompt = PromptTemplate.fromTemplate(TEMPLATE); - /** - * Function calling is currently only supported with ChatOpenAI models - */ - const model = new ChatOpenAI({ - temperature: 0.8, - modelName: "gpt-4", - }); - - /** - * We use Zod (https://zod.dev) to define our schema for convenience, - * but you can pass JSON Schema directly if desired. - */ - const schema = z.object({ - tone: z - .enum(["positive", "negative", "neutral"]) - .describe("The overall tone of the input"), - entity: z.string().describe("The entity mentioned in the input"), - word_count: z.number().describe("The number of words in the input"), - chat_response: z.string().describe("A response to the human's input"), - final_punctuation: z - .optional(z.string()) - .describe("The final punctuation mark in the input, if any."), - }); - - /** - * Bind the function and schema to the OpenAI model. - * Future invocations of the returned model will always use these arguments. - * - * Specifying "function_call" ensures that the provided function will always - * be called by the model. - */ - const functionCallingModel = model.bind({ - functions: [ - { - name: "output_formatter", - description: "Should always be used to properly format output", - parameters: zodToJsonSchema(schema), - }, - ], - function_call: { name: "output_formatter" }, - }); - - /** - * Returns a chain with the function calling model. - */ - const chain = prompt - .pipe(functionCallingModel) - .pipe(new JsonOutputFunctionsParser()); - - const result = await chain.invoke({ - input: currentMessageContent, - }); - - return NextResponse.json(result, { status: 200 }); - } catch (e: any) { - return NextResponse.json({ error: e.message }, { status: 500 }); - } -} diff --git a/client/app/api/retrieval/ingest/route.ts b/client/app/api/retrieval/ingest/route.ts deleted file mode 100644 index d60e63e1..00000000 --- a/client/app/api/retrieval/ingest/route.ts +++ /dev/null @@ -1,63 +0,0 @@ -import { NextRequest, NextResponse } from 'next/server'; -import { RecursiveCharacterTextSplitter } from 'langchain/text_splitter'; - -import { createClient } from '@supabase/supabase-js'; -import { SupabaseVectorStore } from 'langchain/vectorstores/supabase'; -import { OpenAIEmbeddings } from 'langchain/embeddings/openai'; - -export const runtime = 'edge'; - -// Before running, follow set-up instructions at -// https://js.langchain.com/docs/modules/indexes/vector_stores/integrations/supabase - -/** - * This handler takes input text, splits it into chunks, and embeds those chunks - * into a vector store for later retrieval. See the following docs for more information: - * - * https://js.langchain.com/docs/modules/data_connection/document_transformers/text_splitters/recursive_text_splitter - * https://js.langchain.com/docs/modules/data_connection/vectorstores/integrations/supabase - */ -export async function POST(req: NextRequest) { - const body = await req.json(); - const text = body.text; - - if (process.env.NEXT_PUBLIC_DEMO === 'true') { - return NextResponse.json( - { - error: [ - 'Ingest is not supported in demo mode.', - 'Please set up your own version of the repo here: https://github.com/langchain-ai/langchain-nextjs-template', - ].join('\n'), - }, - { status: 403 }, - ); - } - - try { - const client = createClient( - process.env.SUPABASE_URL!, - process.env.SUPABASE_API_KEY!, - ); - - const splitter = RecursiveCharacterTextSplitter.fromLanguage('markdown', { - chunkSize: 256, - chunkOverlap: 20, - }); - - const splitDocuments = await splitter.createDocuments([text]); - - const vectorstore = await SupabaseVectorStore.fromDocuments( - splitDocuments, - new OpenAIEmbeddings(), - { - client, - tableName: 'documents', - queryName: 'match_documents', - }, - ); - - return NextResponse.json({ ok: true }, { status: 200 }); - } catch (e: any) { - return NextResponse.json({ error: e.message }, { status: 500 }); - } -} diff --git a/client/app/retrieval/page.tsx b/client/app/retrieval/page.tsx deleted file mode 100644 index bd1ccaac..00000000 --- a/client/app/retrieval/page.tsx +++ /dev/null @@ -1,110 +0,0 @@ -import { ChatWindow } from '@/components/chat/ChatWindow'; - -export default function AgentsPage() { - const InfoCard = ( -
-

🦉 xuexiao

- -
- ); - return ( - - ); -} diff --git a/client/app/retrieval_agents/page.tsx b/client/app/retrieval_agents/page.tsx deleted file mode 100644 index 8f8798bf..00000000 --- a/client/app/retrieval_agents/page.tsx +++ /dev/null @@ -1,99 +0,0 @@ -import { ChatWindow } from '@/components/chat/ChatWindow'; - -export default function AgentsPage() { - const InfoCard = ( -
-

🦉 xuexiao

- -
- ); - return ( - - ); -} diff --git a/client/app/services/BotsController.ts b/client/app/services/BotsController.ts index 5eaa40db..f1ff16e3 100644 --- a/client/app/services/BotsController.ts +++ b/client/app/services/BotsController.ts @@ -5,36 +5,37 @@ import { BotProfile } from '@/app/interface'; declare type Bot = Tables<'bots'>; +const apiDomain = process.env.NEXT_PUBLIC_API_DOMAIN; // Get the public bot profile by id export async function getBotDetail(id: string): Promise { - const response = await axios.get(`/api/bot/detail?id=${id}`); + const response = await axios.get(`${apiDomain}/api/bot/detail?id=${id}`); return response.data.data; } // Get current user's bot profile by id export async function getBotConfig(id: string): Promise { - const response = await axios.get(`/api/bot/config?id=${id}`); + const response = await axios.get(`${apiDomain}/api/bot/config?id=${id}`); return response.data.data; } // Get the bot list export async function getBotList(personal: boolean): Promise { - const response = await axios.get(`/api/bot/list?personal=${personal}`); + const response = await axios.get(`${apiDomain}/api/bot/list?personal=${personal}`); return response.data.data; } // Delete Bot export async function deleteBot(id: string) { - return axios.delete(`/api/bot/delete?id=${id}`); + return axios.delete(`${apiDomain}/api/bot/delete?id=${id}`); } // Create Bot export async function createBot(profile: BotProfile) { const params = omit(profile, 'id'); - return axios.post('/api/bot/create', params); + return axios.post(`${apiDomain}/api/bot/create`, params); } // Update Bot export async function updateBot(profile: BotProfile) { - return axios.post('/api/bot/update', profile); + return axios.post(`${apiDomain}/api/bot/update`, profile); } diff --git a/client/app/structured_output/page.tsx b/client/app/structured_output/page.tsx deleted file mode 100644 index 176f1944..00000000 --- a/client/app/structured_output/page.tsx +++ /dev/null @@ -1,99 +0,0 @@ -import { ChatWindow } from '@/components/chat/ChatWindow'; - -export default function AgentsPage() { - const InfoCard = ( -
-

🦉 xuexiao

-
    -
  • - 🧱 - - This template showcases how to output structured responses with a{' '} - - LangChain.js - {' '} - chain and the Vercel{' '} - - AI SDK - {' '} - in a{' '} - - Next.js - {' '} - project. - -
  • -
  • - ☎️ - - The chain formats the input schema and passes it into an OpenAI - Functions model, then parses the output. - -
  • -
  • - 💻 - - You can find the prompt, model, and schema logic for this use-case - in app/api/chat/structured_output/route.ts. - -
  • -
  • - 📊 - - By default, the chain returns an object with tone,{' '} - word_count, entity,{' '} - chat_response, and an optional{' '} - final_punctuation, but you can change it to whatever - you'd like! - -
  • -
  • - 💎 - - It uses a lightweight, convenient, and powerful{' '} - - schema validation library called Zod - {' '} - to define schemas, but you can initialize the chain with JSON schema - too. - -
  • -
  • - 🎨 - - The main frontend logic is found in{' '} - app/structured_output/page.tsx. - -
  • -
  • - 🐙 - - This template is open source - you can see the source code and - deploy your own version{' '} - - from the GitHub repo - - ! - -
  • -
  • - 👇 - - Try typing e.g. What a beautiful day! below! - -
  • -
-
- ); - return ( - - ); -} diff --git a/client/middleware.ts b/client/middleware.ts deleted file mode 100644 index 5b2dd272..00000000 --- a/client/middleware.ts +++ /dev/null @@ -1,21 +0,0 @@ -import { NextResponse } from 'next/server' -import type { NextRequest } from 'next/server' -import { getSession } from '@auth0/nextjs-auth0/edge' - -export async function middleware(req: NextRequest) { - const res = NextResponse.next() - - const session = await getSession(); - - if (!session?.user) { - return NextResponse.redirect(new URL('/api/auth/login', req.url)); - } - - return res; -} - -export const config = { - matcher: [ - '/factory/:path*', - ], -} diff --git a/package.json b/package.json index b92311a6..eb62f9b2 100644 --- a/package.json +++ b/package.json @@ -4,9 +4,9 @@ "private": true, "scripts": { "bootstrap": "cd client && yarn && cd ../server && bash setup_python.sh", - "client-dev": "cd client && yarn run dev", - "server-dev": "cd server && ./venv/bin/python3 -m uvicorn main:app --reload", - "dev:python": "concurrently \"yarn run client-dev\" \"yarn run server-dev\"", + "dev:client": "cd client && yarn run dev", + "dev:server": "cd server && ./venv/bin/python3 -m uvicorn main:app --reload", + "dev:app": "concurrently \"yarn run dev:client\" \"yarn run dev:server\"", "build:docker": "docker build -t bot-meta ." }, "engines": {