diff --git a/.cspell.json b/.cspell.json index bfda9c8..65d0f95 100644 --- a/.cspell.json +++ b/.cspell.json @@ -31,7 +31,10 @@ "nemo", "Reranking", "mistralai", - "OPENROUTER_API_KEY" + "Typeguard", + "typeguards", + "OPENROUTER_API_KEY", + "Openrouter" ], "dictionaries": ["typescript", "node", "software-terms"], "import": ["@cspell/dict-typescript/cspell-ext.json", "@cspell/dict-node/cspell-ext.json", "@cspell/dict-software-terms"], diff --git a/.github/workflows/compute.yml b/.github/workflows/compute.yml index 895baec..810c502 100644 --- a/.github/workflows/compute.yml +++ b/.github/workflows/compute.yml @@ -46,9 +46,9 @@ jobs: run: yarn tsx ./src/main.ts id: command-ask env: - SUPABASE_URL: ${{ secrets.SUPABASE_URL }} - SUPABASE_KEY: ${{ secrets.SUPABASE_KEY }} - VOYAGEAI_API_KEY: ${{ secrets.VOYAGEAI_API_KEY }} - OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} - OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} - UBIQUITY_OS_APP_NAME: ${{ secrets.UBIQUITY_OS_APP_NAME }} \ No newline at end of file + SUPABASE_URL: ${{ secrets.SUPABASE_URL }} + SUPABASE_KEY: ${{ secrets.SUPABASE_KEY }} + VOYAGEAI_API_KEY: ${{ secrets.VOYAGEAI_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + OPENROUTER_API_KEY: ${{ secrets.OPENROUTER_API_KEY }} + UBIQUITY_OS_APP_NAME: ${{ secrets.UBIQUITY_OS_APP_NAME }} diff --git a/.github/workflows/update-configuration.yml b/.github/workflows/update-configuration.yml index 2d366d6..b92a487 100644 --- a/.github/workflows/update-configuration.yml +++ b/.github/workflows/update-configuration.yml @@ -18,4 +18,4 @@ jobs: commitMessage: "chore: updated manifest.json and dist build" nodeVersion: "20.10.0" env: - GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 12274bf..e23b105 100644 --- a/.gitignore +++ b/.gitignore @@ -15,4 +15,4 @@ junit.xml cypress/screenshots script.ts .wrangler -test-dashboard.md +test-dashboard.md \ No newline at end of file diff --git a/package.json b/package.json index 94d1357..71bee04 100644 --- a/package.json +++ b/package.json @@ -17,7 +17,7 @@ "knip-ci": "knip --no-exit-code --reporter json --config .github/knip.ts", "prepare": "husky install", "test": "jest --setupFiles dotenv/config --coverage", - "worker": "wrangler dev --env dev --port 5000" + "worker": "wrangler dev --env dev --port 4000" }, "keywords": [ "typescript", @@ -66,7 +66,7 @@ "prettier": "3.3.2", "ts-jest": "29.1.5", "tsx": "4.15.6", - "typescript": "5.4.5", + "typescript": "^5.6.3", "typescript-eslint": "7.13.1", "wrangler": "^3.81.0" }, diff --git a/src/adapters/openai/helpers/completions.ts b/src/adapters/openai/helpers/completions.ts index f68f305..1d282b5 100644 --- a/src/adapters/openai/helpers/completions.ts +++ b/src/adapters/openai/helpers/completions.ts @@ -1,10 +1,12 @@ import OpenAI from "openai"; import { Context } from "../../../types"; import { SuperOpenAi } from "./openai"; +import { CompletionsModelHelper, ModelApplications } from "../../../types/llm"; const MAX_TOKENS = 7000; export interface CompletionsType { answer: string; + groundTruths: string[]; tokenUsage: { input: number; output: number; @@ -72,8 +74,47 @@ export class Completions extends SuperOpenAi { }); const answer = res.choices[0].message; if (answer && answer.content && res.usage) { - return { answer: answer.content, tokenUsage: { input: res.usage.prompt_tokens, output: res.usage.completion_tokens, total: res.usage.total_tokens } }; + return { + answer: answer.content, + groundTruths, + tokenUsage: { input: res.usage.prompt_tokens, output: res.usage.completion_tokens, total: res.usage.total_tokens }, + }; } - return { answer: "", tokenUsage: { input: 0, output: 0, total: 0 } }; + return { answer: "", tokenUsage: { input: 0, output: 0, total: 0 }, groundTruths }; + } + + async createGroundTruthCompletion( + context: Context, + groundTruthSource: string, + systemMsg: string, + model: CompletionsModelHelper + ): Promise { + const { + env: { OPENAI_API_KEY }, + config: { openAiBaseUrl }, + } = context; + + const openAi = new OpenAI({ + apiKey: OPENAI_API_KEY, + ...(openAiBaseUrl && { baseURL: openAiBaseUrl }), + }); + + const msgs = [ + { + role: "system", + content: systemMsg, + }, + { + role: "user", + content: groundTruthSource, + }, + ] as OpenAI.Chat.Completions.ChatCompletionMessageParam[]; + + const res = await openAi.chat.completions.create({ + messages: msgs, + model: model, + }); + + return res.choices[0].message.content; } } diff --git a/src/handlers/ask-llm.ts b/src/handlers/ask-llm.ts index 30112c3..7f3a7b3 100644 --- a/src/handlers/ask-llm.ts +++ b/src/handlers/ask-llm.ts @@ -5,6 +5,8 @@ import { IssueSimilaritySearchResult } from "../adapters/supabase/helpers/issues import { recursivelyFetchLinkedIssues } from "../helpers/issue-fetching"; import { formatChatHistory } from "../helpers/format-chat-history"; import { optimizeContext } from "../helpers/issue"; +import { fetchRepoDependencies, fetchRepoLanguageStats } from "./ground-truths/chat-bot"; +import { findGroundTruths } from "./ground-truths/find-ground-truths"; /** * Asks a question to GPT and returns the response @@ -62,12 +64,14 @@ export async function askGpt(context: Context, question: string, formattedChat: // const reRankedChat = formattedChat.length > 0 ? await context.adapters.voyage.reranker.reRankResults(formattedChat.filter(text => text !== ""), question, 300) : []; similarText = similarText.filter((text) => text !== ""); const rerankedText = similarText.length > 0 ? await context.adapters.voyage.reranker.reRankResults(similarText, question) : []; - return context.adapters.openai.completions.createCompletion( - question, - model, - rerankedText, - formattedChat, - ["typescript", "github", "cloudflare worker", "actions", "jest", "supabase", "openai"], - UBIQUITY_OS_APP_NAME - ); + + const languages = await fetchRepoLanguageStats(context); + const { dependencies, devDependencies } = await fetchRepoDependencies(context); + const groundTruths = await findGroundTruths(context, "chat-bot", { + languages, + dependencies, + devDependencies, + }); + + return context.adapters.openai.completions.createCompletion(question, model, rerankedText, formattedChat, groundTruths, UBIQUITY_OS_APP_NAME); } diff --git a/src/handlers/ground-truths/chat-bot.ts b/src/handlers/ground-truths/chat-bot.ts new file mode 100644 index 0000000..30072ef --- /dev/null +++ b/src/handlers/ground-truths/chat-bot.ts @@ -0,0 +1,64 @@ +import { Context } from "../../types"; +import { logger } from "../../helpers/errors"; + +export async function fetchRepoDependencies(context: Context) { + const { + octokit, + payload: { + repository: { + owner: { login: owner }, + name: repo, + }, + }, + } = context; + + const { data: packageJson } = await octokit.repos.getContent({ + owner, + repo, + path: "package.json", + }); + + if ("content" in packageJson) { + return extractDependencies(JSON.parse(Buffer.from(packageJson.content, "base64").toString())); + } else { + throw logger.error(`No package.json found in ${owner}/${repo}`); + } +} + +export function extractDependencies(packageJson: Record>) { + const { dependencies, devDependencies } = packageJson; + + return { + dependencies, + devDependencies, + }; +} + +export async function fetchRepoLanguageStats(context: Context) { + const { + octokit, + payload: { + repository: { + owner: { login: owner }, + name: repo, + }, + }, + } = context; + + const { data: languages } = await octokit.repos.listLanguages({ + owner, + repo, + }); + + const totalBytes = Object.values(languages).reduce((acc, bytes) => acc + bytes, 0); + + const stats = Object.entries(languages).reduce( + (acc, [language, bytes]) => { + acc[language] = bytes / totalBytes; + return acc; + }, + {} as Record + ); + + return Array.from(Object.entries(stats)).sort((a, b) => b[1] - a[1]); +} diff --git a/src/handlers/ground-truths/create-system-message.ts b/src/handlers/ground-truths/create-system-message.ts new file mode 100644 index 0000000..2c29c78 --- /dev/null +++ b/src/handlers/ground-truths/create-system-message.ts @@ -0,0 +1,17 @@ +export function createGroundTruthSysMsg({ truthRules, example, conditions }: { truthRules: string[]; example: string[]; conditions?: string[] }) { + return ` +Using the input provided, your goal is to produce an array of strings that represent "Ground Truths." +These ground truths are high-level abstractions that encapsulate the tech stack and dependencies of the repository. + +Each ground truth should: +- ${truthRules.join("\n- ")} + +Example: +${example.join("\n")} + +${conditions ? `Conditions:\n${conditions.join("\n")}` : ""} + +Generate similar ground truths adhering to a maximum of 10. + +Return a JSON parsable array of strings representing the ground truths, without comment or directive.`; +} diff --git a/src/handlers/ground-truths/find-ground-truths.ts b/src/handlers/ground-truths/find-ground-truths.ts new file mode 100644 index 0000000..d441710 --- /dev/null +++ b/src/handlers/ground-truths/find-ground-truths.ts @@ -0,0 +1,57 @@ +import { Context } from "../../types"; +import { AppParamsHelper, GroundTruthsSystemMessage, ModelApplications } from "../../types/llm"; +import { GROUND_TRUTHS_SYSTEM_MESSAGES } from "./prompts"; +import { chatBotPayloadTypeguard, codeReviewPayloadTypeguard } from "../../types/typeguards"; +import { validateGroundTruths } from "./validate"; +import { logger } from "../../helpers/errors"; +import { createGroundTruthSysMsg } from "./create-system-message"; + +export async function findGroundTruths( + context: Context, + application: TApp, + params: AppParamsHelper +): Promise { + const systemMsgObj = GROUND_TRUTHS_SYSTEM_MESSAGES[application]; + + // params are deconstructed to show quickly what's being passed to the function + + if (chatBotPayloadTypeguard(params)) { + const { dependencies, devDependencies, languages } = params; + return findChatBotTruths(context, { dependencies, devDependencies, languages }, systemMsgObj); + } else if (codeReviewPayloadTypeguard(params)) { + const { taskSpecification } = params; + return findCodeReviewTruths(context, { taskSpecification }, systemMsgObj); + } else { + throw logger.error("Invalid payload type for ground truths"); + } +} + +async function findChatBotTruths( + context: Context, + params: AppParamsHelper<"chat-bot">, + systemMsgObj: GroundTruthsSystemMessage<"chat-bot"> +): Promise { + const { + adapters: { + openai: { completions }, + }, + } = context; + const systemMsg = createGroundTruthSysMsg(systemMsgObj); + const truths = await completions.createGroundTruthCompletion<"chat-bot">(context, JSON.stringify(params), systemMsg, "o1-mini"); + return validateGroundTruths(truths); +} + +async function findCodeReviewTruths( + context: Context, + params: AppParamsHelper<"code-review">, + systemMsgObj: GroundTruthsSystemMessage<"code-review"> +): Promise { + const { + adapters: { + openai: { completions }, + }, + } = context; + const systemMsg = createGroundTruthSysMsg(systemMsgObj); + const truths = await completions.createGroundTruthCompletion<"code-review">(context, params.taskSpecification, systemMsg, "gpt-4o"); + return validateGroundTruths(truths); +} diff --git a/src/handlers/ground-truths/prompts.ts b/src/handlers/ground-truths/prompts.ts new file mode 100644 index 0000000..d0258e1 --- /dev/null +++ b/src/handlers/ground-truths/prompts.ts @@ -0,0 +1,57 @@ +import { GroundTruthsSystemMessageTemplate, ModelApplications } from "../../types/llm"; + +const CODE_REVIEW_GROUND_TRUTHS_SYSTEM_MESSAGE = { + example: [ + `Using the input provided, your goal is to produce an array of strings that represent "Ground Truths." + These ground truths are high-level abstractions that encapsulate the key aspects of the task. + They serve to guide and inform our code review model's interpretation of the task by providing clear, concise, and explicit insights. + + Each ground truth should: + - Be succinct and easy to understand. + - Directly pertain to the task at hand. + - Focus on essential requirements, behaviors, or assumptions involved in the task. + + Example: + Task: Implement a function that adds two numbers. + Ground Truths: + - The function should accept two numerical inputs. + - The function should return the sum of the two inputs. + - Inputs must be validated to ensure they are numbers. + + Based on the given task, generate similar ground truths adhering to a maximum of 10. + + Return a JSON parsable array of strings representing the ground truths, without comment or directive.`, + ], + truthRules: [], + conditions: [], +}; + +const CHAT_BOT_GROUND_TRUTHS_SYSTEM_MESSAGE = { + truthRules: [ + "Be succinct and easy to understand.", + "Use only the information provided in the input.", + "Focus on essential requirements, behaviors, or assumptions involved in the repository.", + ], + example: [ + "Languages: { TypeScript: 60%, JavaScript: 15%, HTML: 10%, CSS: 5%, ... }", + "Dependencies: Esbuild, Wrangler, React, Tailwind CSS, ms, React-carousel, React-icons, ...", + "Dev Dependencies: @types/node, @types/jest, @mswjs, @testing-library/react, @testing-library/jest-dom, @Cypress ...", + "Ground Truths:", + "- The repo predominantly uses TypeScript, with JavaScript, HTML, and CSS also present.", + "- The repo is a React project that uses Tailwind CSS.", + "- The project is built with Esbuild and deployed with Wrangler, indicating a Cloudflare Workers project.", + "- The repo tests use Jest, Cypress, mswjs, and React Testing Library.", + ], + conditions: [ + "Assume your output builds the foundation for a chatbot to understand the repository when asked an arbitrary query.", + "Do not list every language or dependency, focus on the most prevalent ones.", + "Focus on what is essential to understand the repository at a high level.", + "Brevity is key. Use zero formatting. Do not wrap in quotes, backticks, or other characters.", + `response === ["some", "array", "of", "strings"]`, + ], +}; + +export const GROUND_TRUTHS_SYSTEM_MESSAGES: Record = { + "code-review": CODE_REVIEW_GROUND_TRUTHS_SYSTEM_MESSAGE, + "chat-bot": CHAT_BOT_GROUND_TRUTHS_SYSTEM_MESSAGE, +} as const; diff --git a/src/handlers/ground-truths/validate.ts b/src/handlers/ground-truths/validate.ts new file mode 100644 index 0000000..2068b95 --- /dev/null +++ b/src/handlers/ground-truths/validate.ts @@ -0,0 +1,29 @@ +import { logger } from "../../helpers/errors"; + +export function validateGroundTruths(truthsString: string | null): string[] { + let truths; + if (!truthsString) { + throw logger.error("Failed to generate ground truths"); + } + + try { + truths = JSON.parse(truthsString); + } catch (err) { + throw logger.error("Failed to parse ground truths"); + } + if (!Array.isArray(truths)) { + throw logger.error("Ground truths must be an array"); + } + + if (truths.length > 10) { + throw logger.error("Ground truths must not exceed 10"); + } + + truths.forEach((truth: string) => { + if (typeof truth !== "string") { + throw logger.error("Each ground truth must be a string"); + } + }); + + return truths; +} diff --git a/src/helpers/errors.ts b/src/helpers/errors.ts new file mode 100644 index 0000000..354a399 --- /dev/null +++ b/src/helpers/errors.ts @@ -0,0 +1,3 @@ +import { Logs } from "@ubiquity-dao/ubiquibot-logger"; // import is fixed in #13 + +export const logger = new Logs("debug"); diff --git a/src/plugin.ts b/src/plugin.ts index b1ff223..a685297 100644 --- a/src/plugin.ts +++ b/src/plugin.ts @@ -57,13 +57,19 @@ export async function runPlugin(context: Context) { let commentToPost; try { const response = await askQuestion(context, question); - const { answer, tokenUsage } = response; + const { answer, tokenUsage, groundTruths } = response; if (!answer) { throw logger.error(`No answer from OpenAI`); } logger.info(`Answer: ${answer}`, { tokenUsage }); - const tokens = `\n\n`; - commentToPost = answer + tokens; + + const metadata = { + groundTruths, + tokenUsage, + }; + + const metadataString = createStructuredMetadata("LLM Ground Truths and Token Usage", logger.info(`Answer: ${answer}`, { metadata })); + commentToPost = answer + metadataString; } catch (err) { let errorMessage; if (err instanceof LogReturn) { @@ -81,3 +87,30 @@ export async function runPlugin(context: Context) { function sanitizeMetadata(obj: LogReturn["metadata"]): string { return JSON.stringify(obj, null, 2).replace(//g, ">").replace(/--/g, "--"); } + +function createStructuredMetadata(header: string | undefined, logReturn: LogReturn) { + let logMessage, metadata; + if (logReturn) { + logMessage = logReturn.logMessage; + metadata = logReturn.metadata; + } + + const jsonPretty = sanitizeMetadata(metadata); + const stackLine = new Error().stack?.split("\n")[2] ?? ""; + const caller = stackLine.match(/at (\S+)/)?.[1] ?? ""; + const ubiquityMetadataHeader = `\n\n"].join("\n"); + + if (logMessage?.type === "fatal") { + // if the log message is fatal, then we want to show the metadata + metadataSerialized = [metadataSerializedVisible, metadataSerializedHidden].join("\n"); + } else { + // otherwise we want to hide it + metadataSerialized = metadataSerializedHidden; + } + + return metadataSerialized; +} diff --git a/src/types/llm.d.ts b/src/types/llm.d.ts deleted file mode 100644 index 5bfaa19..0000000 --- a/src/types/llm.d.ts +++ /dev/null @@ -1,19 +0,0 @@ -export type StreamlinedComment = { - id: number; - user?: string; - body?: string; - org: string; - repo: string; - issueUrl: string; - specOrBody?: { - html: string; - text: string; - }; -}; - -export type StreamlinedComments = { - issueNumber: number; - repo: string; - org: string; - comments: StreamlinedComment[]; -}; diff --git a/src/types/llm.ts b/src/types/llm.ts new file mode 100644 index 0000000..f01a70d --- /dev/null +++ b/src/types/llm.ts @@ -0,0 +1,53 @@ +import { GROUND_TRUTHS_SYSTEM_MESSAGES } from "../handlers/ground-truths/prompts"; + +export type ModelApplications = "code-review" | "chat-bot"; + +type ChatBotAppParams = { + languages: [string, number][]; + dependencies: Record; + devDependencies: Record; +}; + +type CodeReviewAppParams = { + taskSpecification: string; +}; + +export type AppParamsHelper = TApp extends "code-review" + ? CodeReviewAppParams + : TApp extends "chat-bot" + ? ChatBotAppParams + : never; + +export type CompletionsModelHelper = TApp extends "code-review" ? "gpt-4o" : TApp extends "chat-bot" ? "o1-mini" : never; + +export type GroundTruthsSystemMessage = TApp extends "code-review" + ? (typeof GROUND_TRUTHS_SYSTEM_MESSAGES)["code-review"] + : TApp extends "chat-bot" + ? (typeof GROUND_TRUTHS_SYSTEM_MESSAGES)["chat-bot"] + : never; + +export type GroundTruthsSystemMessageTemplate = { + truthRules: string[]; + example: string[]; + conditions?: string[]; +}; + +export type StreamlinedComment = { + id: number; + user?: string; + body?: string; + org: string; + repo: string; + issueUrl: string; + specOrBody?: { + html: string; + text: string; + }; +}; + +export type StreamlinedComments = { + issueNumber: number; + repo: string; + org: string; + comments: StreamlinedComment[]; +}; diff --git a/src/types/typeguards.ts b/src/types/typeguards.ts new file mode 100644 index 0000000..c582d42 --- /dev/null +++ b/src/types/typeguards.ts @@ -0,0 +1,9 @@ +import { AppParamsHelper } from "./llm"; + +export function chatBotPayloadTypeguard(payload: unknown): payload is AppParamsHelper<"chat-bot"> { + return typeof payload === "object" && payload !== null && "languages" in payload && "dependencies" in payload; +} + +export function codeReviewPayloadTypeguard(payload: unknown): payload is AppParamsHelper<"code-review"> { + return typeof payload === "object" && payload !== null && "taskSpecification" in payload && "codeReviewModelPrompt" in payload; +} diff --git a/tests/__mocks__/handlers.ts b/tests/__mocks__/handlers.ts index 20503d9..be7ba62 100644 --- a/tests/__mocks__/handlers.ts +++ b/tests/__mocks__/handlers.ts @@ -7,7 +7,7 @@ import issueTemplate from "./issue-template"; */ export const handlers = [ http.post("https://api.openai.com/v1/chat/completions", () => { - const answer = `This is a mock answer for the chat`; + const answer = `${JSON.stringify(["This is a mock response from OpenAI"])}`; return HttpResponse.json({ usage: { @@ -85,4 +85,16 @@ export const handlers = [ db.pull.findFirst({ where: { owner: { equals: owner as string }, repo: { equals: repo as string }, number: { equals: Number(pullNumber) } } }) ) ), + http.get("https://api.github.com/repos/:owner/:repo/languages", ({ params: { owner, repo } }) => + HttpResponse.json(db.repo.findFirst({ where: { owner: { login: { equals: owner as string } }, name: { equals: repo as string } } })) + ), + http.get("https://api.github.com/repos/:owner/:repo/contents/:path", () => + HttpResponse.json({ + type: "file", + encoding: "base64", + size: 5362, + name: "README.md", + content: Buffer.from(JSON.stringify({ content: "This is a mock README file" })).toString("base64"), + }) + ), ]; diff --git a/tests/main.test.ts b/tests/main.test.ts index 57c0e72..30e76b6 100644 --- a/tests/main.test.ts +++ b/tests/main.test.ts @@ -18,6 +18,7 @@ const TEST_SLASH_COMMAND = "@UbiquityOS what is pi?"; const LOG_CALLER = "_Logs."; const ISSUE_ID_2_CONTENT = "More context here #2"; const ISSUE_ID_3_CONTENT = "More context here #3"; +const MOCK_ANSWER = "This is a mock answer for the chat"; type Comment = { id: number; @@ -61,7 +62,7 @@ describe("Ask plugin tests", () => { expect(res).toBeDefined(); - expect(res?.answer).toBe("This is a mock answer for the chat"); + expect(res?.answer).toBe(MOCK_ANSWER); }); it("should not ask GPT a question if comment is from a bot", async () => { @@ -106,7 +107,6 @@ describe("Ask plugin tests", () => { createComments([transformCommentTemplate(1, 1, TEST_QUESTION, "ubiquity", "test-repo", true)]); await runPlugin(ctx); - expect(infoSpy).toHaveBeenCalledTimes(3); expect(infoSpy).toHaveBeenNthCalledWith(1, `Asking question: @UbiquityOS ${TEST_QUESTION}`); expect(infoSpy).toHaveBeenNthCalledWith(3, "Answer: This is a mock answer for the chat", { caller: LOG_CALLER, @@ -130,8 +130,6 @@ describe("Ask plugin tests", () => { await runPlugin(ctx); - expect(infoSpy).toHaveBeenCalledTimes(3); - expect(infoSpy).toHaveBeenNthCalledWith(1, `Asking question: @UbiquityOS ${TEST_QUESTION}`); const prompt = `=== Current Issue #1 Specification === ubiquity/test-repo/1 === @@ -395,7 +393,8 @@ function createContext(body = TEST_SLASH_COMMAND) { completions: { createCompletion: async (): Promise => { return { - answer: "This is a mock answer for the chat", + answer: MOCK_ANSWER, + groundTruths: [MOCK_ANSWER], tokenUsage: { input: 1000, output: 150, @@ -403,6 +402,9 @@ function createContext(body = TEST_SLASH_COMMAND) { }, }; }, + createGroundTruthCompletion: async (): Promise => { + return `["${MOCK_ANSWER}"]`; + }, }, }, }, diff --git a/yarn.lock b/yarn.lock index 8dccaee..bb77c02 100644 --- a/yarn.lock +++ b/yarn.lock @@ -6468,10 +6468,10 @@ typescript-eslint@7.13.1: "@typescript-eslint/parser" "7.13.1" "@typescript-eslint/utils" "7.13.1" -typescript@5.4.5: - version "5.4.5" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.4.5.tgz#42ccef2c571fdbd0f6718b1d1f5e6e5ef006f611" - integrity sha512-vcI4UpRgg81oIRUFwR0WSIHKt11nJ7SAVlYNIu+QpqeyXP+gpQJy/Z4+F0aGxSE4MqwjyXvW/TzgkLAx2AGHwQ== +typescript@^5.6.3: + version "5.6.3" + resolved "https://registry.yarnpkg.com/typescript/-/typescript-5.6.3.tgz#5f3449e31c9d94febb17de03cc081dd56d81db5b" + integrity sha512-hjcS1mhfuyi4WW8IWtjP7brDrG2cuDZukyrYrSauoXGNgx0S7zceP07adYkJycEr56BOUTNPzbInooiN3fn1qw== ufo@^1.5.4: version "1.5.4"