diff --git a/.github/workflows/openai-review.yml b/.github/workflows/openai-review.yml
index c35e3659..c57e9883 100644
--- a/.github/workflows/openai-review.yml
+++ b/.github/workflows/openai-review.yml
@@ -3,9 +3,10 @@ name: OpenAI Reviewer
permissions:
contents: read
pull-requests: write
+ id-token: 'write'
on:
- pull_request_target:
+ pull_request:
types: [opened, synchronize, reopened]
pull_request_review_comment:
types: [created]
@@ -21,19 +22,20 @@ jobs:
review:
runs-on: ubuntu-latest
steps:
- - uses: actions/checkout@v3
- with:
- repository: ${{github.event.pull_request.head.repo.full_name}}
- ref: ${{github.event.pull_request.head.ref}}
- submodules: false
- - uses: ./
- env:
- GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
+ - uses: actions/checkout@v4
+
+ - name: Authenticate to Google Cloud
+ uses: google-github-actions/auth@v2
with:
- debug: true
- review_comment_lgtm: false
- openai_heavy_model: gpt-4
- path_filters: |
- !dist/**
- !**/*.lock
+ # workload_identity_provider: projects/636924443032/locations/global/workloadIdentityPools/github-pool/providers/github-provider
+ credentials_json: ${{ secrets.GOOGLE_CREDENTIALS }}
+ project_id: sandbox-toga4-vertexai
+
+ # - uses: ./
+ # env:
+ # GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ # with:
+ # debug: true
+ # language: ja-JP
+ # vertexai_project_id: sandbox-toga4-vertexai
+ # vertexai_location: asia-northeast1
diff --git a/LICENSE b/LICENSE
index ba3c575b..4aab6a68 100644
--- a/LICENSE
+++ b/LICENSE
@@ -2,6 +2,7 @@ The MIT License (MIT)
Copyright (c) 2023 FluxNinja, Inc.
Copyright (c) 2023 Tao He
+Copyright (c) 2023 toga4
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/action.yml b/action.yml
index 8c96b0ba..f195b3c2 100644
--- a/action.yml
+++ b/action.yml
@@ -3,7 +3,7 @@ description: 'AI-based PR Reviewer & Summarizer with Chat Capabilities'
branding:
icon: 'git-merge'
color: 'orange'
-author: 'CodeRabbit LLC'
+author: 'toga4'
inputs:
debug:
required: false
@@ -144,46 +144,57 @@ inputs:
required: false
description: 'Disable release notes'
default: 'false'
- openai_base_url:
+ # vertexai_base_url:
+ # required: false
+ # description: 'The url of the Vertex AI api interface.'
+ # default: 'https://us-central1-aiplatform.googleapis.com'
+ vertexai_project_id:
+ required: true
+ description: 'Project ID for Vertex AI'
+ vertexai_location:
required: false
- description: 'The url of the openai api interface.'
- default: 'https://api.openai.com/v1'
- openai_light_model:
+ description: 'Location for Vertex AI'
+ default: 'us-central1'
+ vertexai_light_model:
required: false
description:
'Model to use for simple tasks like summarizing diff on a file.'
- default: 'gpt-3.5-turbo'
- openai_heavy_model:
+ default: 'codechat-bison'
+ vertexai_heavy_model:
required: false
description: 'Model to use for complex tasks such as code reviews.'
- default: 'gpt-4'
- openai_model_temperature:
+ default: 'codechat-bison-32k'
+ vertexai_model_temperature:
required: false
- description: 'Temperature for GPT model'
- default: '0.05'
- openai_retries:
+ description: 'Temperature for Vertex AI model'
+ default: '0.9'
+ vertexai_model_top_k:
+ required: false
+ description: 'Top K for Vertex AI model'
+ default: '32'
+ vertexai_model_top_p:
+ required: false
+ description: 'Top P for Vertex AI model'
+ default: '1.0'
+ vertexai_retries:
required: false
description:
- 'How many times to retry OpenAI API in case of timeouts or errors?'
+ 'How many times to retry Vertex AI API in case of timeouts or errors?'
default: '5'
- openai_timeout_ms:
- required: false
- description: 'Timeout for OpenAI API call in millis'
- default: '360000'
- openai_concurrency_limit:
+ vertexai_concurrency_limit:
required: false
- description: 'How many concurrent API calls to make to OpenAI servers?'
- default: '6'
+ description: 'How many concurrent API calls to make to Vertex AI servers?'
+ default: '1'
github_concurrency_limit:
required: false
description: 'How many concurrent API calls to make to GitHub?'
default: '6'
system_message:
required: false
- description: 'System message to be sent to OpenAI'
+ description: 'System message to be sent to Vertex AI'
default: |
- You are `@coderabbitai` (aka `github-actions[bot]`), a language model
- trained by OpenAI. Your purpose is to act as a highly experienced
+ You are `/aireviewer` (aka `github-actions[bot]`), a language model
+ trained by Vertex AI. Your purpose is to act as a highly experienced
software engineer and provide a thorough review of the code hunks
and suggest code snippets to improve key areas such as:
- Logic
@@ -202,6 +213,11 @@ inputs:
comments/documentation. Identify and resolve significant
concerns to improve overall code quality while deliberately
disregarding minor issues.
+ reply_for_system_message:
+ required: false
+ description: 'The first reply for system message from Vertex AI'
+ default: |
+ Got it. I'll review the code and provide a summary. Let's get started.
summarize:
required: false
description: 'The prompt for final summarization response'
@@ -237,7 +253,7 @@ inputs:
bot_icon:
required: false
description: 'The icon for the bot'
- default: ''
+ default: 🤖
runs:
- using: 'node16'
+ using: 'node20'
main: 'dist/index.js'
diff --git a/docs/oidc.sh b/docs/oidc.sh
new file mode 100644
index 00000000..6b57c6eb
--- /dev/null
+++ b/docs/oidc.sh
@@ -0,0 +1,56 @@
+set -euo pipefail
+
+PROJECT_ID=$(gcloud config get-value project)
+SERVICE_ACCOUNT_NAME="github-action"
+SERVICE_ACCOUNT="${SERVICE_ACCOUNT_NAME}@${PROJECT_ID}.iam.gserviceaccount.com"
+WORKLOAD_IDENTITY_POOL="github-pool"
+WORKLOAD_IDENTITY_PROVIDER="github-provider"
+GIT_USER=$(git config user.name)
+REPO=$(git remote get-url origin | perl -pe 's/.git$//' | perl -pe 's;.+/([^/]+/[^/]);$1;')
+
+echo "PROJECT_ID=${PROJECT_ID}
+SERVICE_ACCOUNT_NAME=${SERVICE_ACCOUNT_NAME}
+SERVICE_ACCOUNT=${SERVICE_ACCOUNT}
+WORKLOAD_IDENTITY_POOL=${WORKLOAD_IDENTITY_POOL}
+WORKLOAD_IDENTITY_PROVIDER=${WORKLOAD_IDENTITY_PROVIDER}
+GIT_USER=${GIT_USER}
+REPO=${REPO}"
+
+gcloud iam service-accounts create "${SERVICE_ACCOUNT_NAME}" \
+ --project "${PROJECT_ID}"
+
+gcloud projects add-iam-policy-binding "${PROJECT_ID}" \
+ --role="roles/aiplatform.user" \
+ --member="serviceAccount:${SERVICE_ACCOUNT}"
+
+gcloud iam workload-identity-pools create "${WORKLOAD_IDENTITY_POOL}" \
+ --project="${PROJECT_ID}" \
+ --location="global" \
+ --display-name="GitHub Actions Pool"
+
+WORKLOAD_IDENTITY_POOL_ID=$(gcloud iam workload-identity-pools describe "${WORKLOAD_IDENTITY_POOL}" \
+ --project="${PROJECT_ID}" \
+ --location="global" \
+ --format="value(name)")
+
+echo "WORKLOAD_IDENTITY_POOL_ID=${WORKLOAD_IDENTITY_POOL_ID}"
+
+gcloud iam workload-identity-pools providers create-oidc "${WORKLOAD_IDENTITY_PROVIDER}" \
+ --project="${PROJECT_ID}" \
+ --location="global" \
+ --workload-identity-pool="${WORKLOAD_IDENTITY_POOL}" \
+ --display-name="My GitHub repo Provider" \
+ --attribute-mapping="google.subject=assertion.sub,attribute.actor=assertion.actor,attribute.repository=assertion.repository,attribute.repository_owner=assertion.repository_owner" \
+ --attribute-condition="attribute.repository_owner=='${GIT_USER}'" \
+ --issuer-uri="https://token.actions.githubusercontent.com"
+
+gcloud iam workload-identity-pools providers describe "${WORKLOAD_IDENTITY_PROVIDER}" \
+ --project="${PROJECT_ID}" \
+ --location="global" \
+ --workload-identity-pool="${WORKLOAD_IDENTITY_POOL}" \
+ --format="value(name)"
+
+gcloud iam service-accounts add-iam-policy-binding "${SERVICE_ACCOUNT}" \
+ --project="${PROJECT_ID}" \
+ --role="roles/iam.workloadIdentityUser" \
+ --member="principalSet://iam.googleapis.com/${WORKLOAD_IDENTITY_POOL_ID}/attribute.repository/${REPO}"
diff --git a/package-lock.json b/package-lock.json
index 1bccefde..d1aae0d4 100644
--- a/package-lock.json
+++ b/package-lock.json
@@ -1,11 +1,11 @@
{
- "name": "openai-pr-reviewer",
+ "name": "vertexai-pr-reviewer",
"version": "0.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
- "name": "openai-pr-reviewer",
+ "name": "vertexai-pr-reviewer",
"version": "0.0.0",
"license": "MIT",
"dependencies": {
diff --git a/package.json b/package.json
index abf29881..963529f6 100644
--- a/package.json
+++ b/package.json
@@ -1,8 +1,8 @@
{
- "name": "openai-pr-reviewer",
+ "name": "vertexai-pr-reviewer",
"version": "0.0.0",
"private": true,
- "description": "OpenAI-based PR Reviewer and Summarizer.",
+ "description": "Vertex AI based PR Reviewer and Summarizer.",
"main": "lib/main.js",
"scripts": {
"build": "cp node_modules/@dqbd/tiktoken/tiktoken_bg.wasm dist/tiktoken_bg.wasm && tsc",
@@ -16,7 +16,7 @@
},
"repository": {
"type": "git",
- "url": "git+https://github.com/fluxninja/openai-pr-reviewer.git"
+ "url": "git+https://github.com/toga4/vertexai-pr-reviewer.git"
},
"keywords": [
"actions",
diff --git a/src/bot.ts b/src/bot.ts
index 4a5a1e4b..f85e714c 100644
--- a/src/bot.ts
+++ b/src/bot.ts
@@ -1,15 +1,6 @@
import './fetch-polyfill'
-
-import {info, setFailed, warning} from '@actions/core'
-import {
- ChatGPTAPI,
- ChatGPTError,
- ChatMessage,
- SendMessageOptions
- // eslint-disable-next-line import/no-unresolved
-} from 'chatgpt'
-import pRetry from 'p-retry'
-import {OpenAIOptions, Options} from './options'
+import {info, warning} from '@actions/core'
+import {VertexAIOptions, Options} from './options'
// define type to save parentMessageId and conversationId
export interface Ids {
@@ -18,111 +9,35 @@ export interface Ids {
}
export class Bot {
- private readonly api: ChatGPTAPI | null = null // not free
-
+ private readonly api: null
private readonly options: Options
- constructor(options: Options, openaiOptions: OpenAIOptions) {
+ constructor(options: Options, vertexaiOptions: VertexAIOptions) {
this.options = options
- if (process.env.OPENAI_API_KEY) {
- const currentDate = new Date().toISOString().split('T')[0]
- const systemMessage = `${options.systemMessage}
-Knowledge cutoff: ${openaiOptions.tokenLimits.knowledgeCutOff}
-Current date: ${currentDate}
-
-IMPORTANT: Entire response must be in the language with ISO code: ${options.language}
-`
-
- this.api = new ChatGPTAPI({
- apiBaseUrl: options.apiBaseUrl,
- systemMessage,
- apiKey: process.env.OPENAI_API_KEY,
- apiOrg: process.env.OPENAI_API_ORG ?? undefined,
- debug: options.debug,
- maxModelTokens: openaiOptions.tokenLimits.maxTokens,
- maxResponseTokens: openaiOptions.tokenLimits.responseTokens,
- completionParams: {
- temperature: options.openaiModelTemperature,
- model: openaiOptions.model
- }
- })
- } else {
- const err =
- "Unable to initialize the OpenAI API, both 'OPENAI_API_KEY' environment variable are not available"
- throw new Error(err)
+ this.api = null // TODO
+ if (options.debug) {
+ const dump = JSON.stringify({options, vertexaiOptions}, null, 2)
+ info(`vertexai options: ${dump}`)
}
}
- chat = async (message: string, ids: Ids): Promise<[string, Ids]> => {
- let res: [string, Ids] = ['', {}]
+ chat = async (message: string): Promise => {
try {
- res = await this.chat_(message, ids)
- return res
+ return await this.chat_(message)
} catch (e: unknown) {
- if (e instanceof ChatGPTError) {
+ if (e instanceof Error) {
warning(`Failed to chat: ${e}, backtrace: ${e.stack}`)
+ } else {
+ warning(`Failed to chat: ${e}`)
}
- return res
+ return ''
}
}
- private readonly chat_ = async (
- message: string,
- ids: Ids
- ): Promise<[string, Ids]> => {
- // record timing
- const start = Date.now()
+ private readonly chat_ = async (message: string): Promise => {
if (!message) {
- return ['', {}]
- }
-
- let response: ChatMessage | undefined
-
- if (this.api != null) {
- const opts: SendMessageOptions = {
- timeoutMs: this.options.openaiTimeoutMS
- }
- if (ids.parentMessageId) {
- opts.parentMessageId = ids.parentMessageId
- }
- try {
- response = await pRetry(() => this.api!.sendMessage(message, opts), {
- retries: this.options.openaiRetries
- })
- } catch (e: unknown) {
- if (e instanceof ChatGPTError) {
- info(
- `response: ${response}, failed to send message to openai: ${e}, backtrace: ${e.stack}`
- )
- }
- }
- const end = Date.now()
- info(`response: ${JSON.stringify(response)}`)
- info(
- `openai sendMessage (including retries) response time: ${
- end - start
- } ms`
- )
- } else {
- setFailed('The OpenAI API is not initialized')
- }
- let responseText = ''
- if (response != null) {
- responseText = response.text
- } else {
- warning('openai response is null')
- }
- // remove the prefix "with " in the response
- if (responseText.startsWith('with ')) {
- responseText = responseText.substring(5)
- }
- if (this.options.debug) {
- info(`openai responses: ${responseText}`)
- }
- const newIds: Ids = {
- parentMessageId: response?.id,
- conversationId: response?.conversationId
+ return ''
}
- return [responseText, newIds]
+ return ''
}
}
diff --git a/src/commenter.ts b/src/commenter.ts
index fc14e70f..4a3d961b 100644
--- a/src/commenter.ts
+++ b/src/commenter.ts
@@ -7,40 +7,40 @@ import {octokit} from './octokit'
const context = github_context
const repo = context.repo
-export const COMMENT_GREETING = `${getInput('bot_icon')} CodeRabbit`
+export const COMMENT_GREETING = `${getInput('bot_icon')} AI Reviewer`
export const COMMENT_TAG =
- ''
+ ''
export const COMMENT_REPLY_TAG =
- ''
+ ''
export const SUMMARIZE_TAG =
- ''
+ ''
export const IN_PROGRESS_START_TAG =
- ''
+ ''
export const IN_PROGRESS_END_TAG =
- ''
+ ''
export const DESCRIPTION_START_TAG =
- ''
+ ''
export const DESCRIPTION_END_TAG =
- ''
+ ''
-export const RAW_SUMMARY_START_TAG = `
+export const RAW_SUMMARY_START_TAG = `
-`
+`
-export const SHORT_SUMMARY_START_TAG = `
+export const SHORT_SUMMARY_START_TAG = `
-`
+`
export const COMMIT_ID_START_TAG = ''
export const COMMIT_ID_END_TAG = ''
diff --git a/src/limits.ts b/src/limits.ts
index aca807f6..d9ba14c0 100644
--- a/src/limits.ts
+++ b/src/limits.ts
@@ -2,19 +2,17 @@ export class TokenLimits {
maxTokens: number
requestTokens: number
responseTokens: number
- knowledgeCutOff: string
- constructor(model = 'gpt-3.5-turbo') {
- this.knowledgeCutOff = '2021-09-01'
- if (model === 'gpt-4-32k') {
- this.maxTokens = 32600
- this.responseTokens = 4000
- } else if (model === 'gpt-3.5-turbo-16k') {
- this.maxTokens = 16300
- this.responseTokens = 3000
- } else if (model === 'gpt-4') {
- this.maxTokens = 8000
- this.responseTokens = 2000
+ constructor(model = 'codechat-bison') {
+ if (model === 'codechat-bison') {
+ this.maxTokens = 6144 + 1024
+ this.responseTokens = 1024
+ } else if (model === 'codechat-bison-32k') {
+ this.maxTokens = 32000
+ this.responseTokens = 8192
+ } else if (model === 'gemini-pro') {
+ this.maxTokens = 32000
+ this.responseTokens = 8192
} else {
this.maxTokens = 4000
this.responseTokens = 1000
diff --git a/src/main.ts b/src/main.ts
index 0b716c48..13915868 100644
--- a/src/main.ts
+++ b/src/main.ts
@@ -6,7 +6,7 @@ import {
warning
} from '@actions/core'
import {Bot} from './bot'
-import {OpenAIOptions, Options} from './options'
+import {VertexAIOptions, Options} from './options'
import {Prompts} from './prompts'
import {codeReview} from './review'
import {handleReviewComment} from './review-comment'
@@ -21,14 +21,18 @@ async function run(): Promise {
getBooleanInput('review_comment_lgtm'),
getMultilineInput('path_filters'),
getInput('system_message'),
- getInput('openai_light_model'),
- getInput('openai_heavy_model'),
- getInput('openai_model_temperature'),
- getInput('openai_retries'),
- getInput('openai_timeout_ms'),
- getInput('openai_concurrency_limit'),
+ getInput('reply_for_system_message'),
+ getInput('vertexai_project_id'),
+ getInput('vertexai_location'),
+ getInput('vertexai_light_model'),
+ getInput('vertexai_heavy_model'),
+ getInput('vertexai_model_temperature'),
+ getInput('vertexai_model_top_k'),
+ getInput('vertexai_model_top_p'),
+ getInput('vertexai_retries'),
+ getInput('vertexai_concurrency_limit'),
getInput('github_concurrency_limit'),
- getInput('openai_base_url'),
+ // getInput('vertexai_base_url'),
getInput('language')
)
@@ -46,12 +50,10 @@ async function run(): Promise {
try {
lightBot = new Bot(
options,
- new OpenAIOptions(options.openaiLightModel, options.lightTokenLimits)
+ new VertexAIOptions(options.vertexaiLightModel, options.lightTokenLimits)
)
} catch (e: any) {
- warning(
- `Skipped: failed to create summary bot, please check your openai_api_key: ${e}, backtrace: ${e.stack}`
- )
+ warning(`Skipped: failed to create summary bot ${e}, backtrace: ${e.stack}`)
return
}
@@ -59,12 +61,10 @@ async function run(): Promise {
try {
heavyBot = new Bot(
options,
- new OpenAIOptions(options.openaiHeavyModel, options.heavyTokenLimits)
+ new VertexAIOptions(options.vertexaiHeavyModel, options.heavyTokenLimits)
)
} catch (e: any) {
- warning(
- `Skipped: failed to create review bot, please check your openai_api_key: ${e}, backtrace: ${e.stack}`
- )
+ warning(`Skipped: failed to create review bot ${e}, backtrace: ${e.stack}`)
return
}
@@ -93,7 +93,13 @@ async function run(): Promise {
process
.on('unhandledRejection', (reason, p) => {
- warning(`Unhandled Rejection at Promise: ${reason}, promise is ${p}`)
+ if (reason instanceof Error) {
+ warning(
+ `Unhandled Rejection at Promise: ${reason}, backtrace: ${reason.stack}`
+ )
+ } else {
+ warning(`Unhandled Rejection at Promise: ${reason}, promise is ${p}`)
+ }
})
.on('uncaughtException', (e: any) => {
warning(`Uncaught Exception thrown: ${e}, backtrace: ${e.stack}`)
diff --git a/src/options.ts b/src/options.ts
index 02541975..3c67a907 100644
--- a/src/options.ts
+++ b/src/options.ts
@@ -11,16 +11,20 @@ export class Options {
reviewCommentLGTM: boolean
pathFilters: PathFilter
systemMessage: string
- openaiLightModel: string
- openaiHeavyModel: string
- openaiModelTemperature: number
- openaiRetries: number
- openaiTimeoutMS: number
- openaiConcurrencyLimit: number
+ replyForSystemMessage: string
+ vertexaiProjectID: string
+ vertexaiLocation: string
+ vertexaiLightModel: string
+ vertexaiHeavyModel: string
+ vertexaiModelTemperature: number
+ vertexaiModelTopK: number
+ vertexaiModelTopP: number
+ vertexaiRetries: number
+ vertexaiConcurrencyLimit: number
githubConcurrencyLimit: number
lightTokenLimits: TokenLimits
heavyTokenLimits: TokenLimits
- apiBaseUrl: string
+ // apiBaseUrl: string
language: string
constructor(
@@ -32,14 +36,18 @@ export class Options {
reviewCommentLGTM = false,
pathFilters: string[] | null = null,
systemMessage = '',
- openaiLightModel = 'gpt-3.5-turbo',
- openaiHeavyModel = 'gpt-3.5-turbo',
- openaiModelTemperature = '0.0',
- openaiRetries = '3',
- openaiTimeoutMS = '120000',
- openaiConcurrencyLimit = '6',
+ replyForSystemMessage = '',
+ vertexaiProjectID: string,
+ vertexaiLocation = 'us-central1',
+ vertexaiLightModel = 'gemini-pro',
+ vertexaiHeavyModel = 'gemini-pro',
+ vertexaiModelTemperature = '0.9',
+ vertexaiModelTopK = '32',
+ vertexaiModelTopP = '1.0',
+ vertexaiRetries = '3',
+ vertexaiConcurrencyLimit = '6',
githubConcurrencyLimit = '6',
- apiBaseUrl = 'https://api.openai.com/v1',
+ // apiBaseUrl = 'https://api.vertexai.com/v1',
language = 'en-US'
) {
this.debug = debug
@@ -50,16 +58,20 @@ export class Options {
this.reviewCommentLGTM = reviewCommentLGTM
this.pathFilters = new PathFilter(pathFilters)
this.systemMessage = systemMessage
- this.openaiLightModel = openaiLightModel
- this.openaiHeavyModel = openaiHeavyModel
- this.openaiModelTemperature = parseFloat(openaiModelTemperature)
- this.openaiRetries = parseInt(openaiRetries)
- this.openaiTimeoutMS = parseInt(openaiTimeoutMS)
- this.openaiConcurrencyLimit = parseInt(openaiConcurrencyLimit)
+ this.replyForSystemMessage = replyForSystemMessage
+ this.vertexaiProjectID = vertexaiProjectID
+ this.vertexaiLocation = vertexaiLocation
+ this.vertexaiLightModel = vertexaiLightModel
+ this.vertexaiHeavyModel = vertexaiHeavyModel
+ this.vertexaiModelTemperature = parseFloat(vertexaiModelTemperature)
+ this.vertexaiModelTopK = parseInt(vertexaiModelTopK)
+ this.vertexaiModelTopP = parseFloat(vertexaiModelTopP)
+ this.vertexaiRetries = parseInt(vertexaiRetries)
+ this.vertexaiConcurrencyLimit = parseInt(vertexaiConcurrencyLimit)
this.githubConcurrencyLimit = parseInt(githubConcurrencyLimit)
- this.lightTokenLimits = new TokenLimits(openaiLightModel)
- this.heavyTokenLimits = new TokenLimits(openaiHeavyModel)
- this.apiBaseUrl = apiBaseUrl
+ this.lightTokenLimits = new TokenLimits(vertexaiLightModel)
+ this.heavyTokenLimits = new TokenLimits(vertexaiHeavyModel)
+ // this.apiBaseUrl = apiBaseUrl
this.language = language
}
@@ -73,16 +85,20 @@ export class Options {
info(`review_comment_lgtm: ${this.reviewCommentLGTM}`)
info(`path_filters: ${this.pathFilters}`)
info(`system_message: ${this.systemMessage}`)
- info(`openai_light_model: ${this.openaiLightModel}`)
- info(`openai_heavy_model: ${this.openaiHeavyModel}`)
- info(`openai_model_temperature: ${this.openaiModelTemperature}`)
- info(`openai_retries: ${this.openaiRetries}`)
- info(`openai_timeout_ms: ${this.openaiTimeoutMS}`)
- info(`openai_concurrency_limit: ${this.openaiConcurrencyLimit}`)
+ info(`reply_for_system_message: ${this.replyForSystemMessage}`)
+ info(`vertexai_project_id: ${this.vertexaiProjectID}`)
+ info(`vertexai_location: ${this.vertexaiLocation}`)
+ info(`vertexai_light_model: ${this.vertexaiLightModel}`)
+ info(`vertexai_heavy_model: ${this.vertexaiHeavyModel}`)
+ info(`vertexai_model_temperature: ${this.vertexaiModelTemperature}`)
+ info(`vertexai_model_top_k: ${this.vertexaiModelTopK}`)
+ info(`vertexai_model_top_p: ${this.vertexaiModelTopP}`)
+ info(`vertexai_retries: ${this.vertexaiRetries}`)
+ info(`vertexai_concurrency_limit: ${this.vertexaiConcurrencyLimit}`)
info(`github_concurrency_limit: ${this.githubConcurrencyLimit}`)
info(`summary_token_limits: ${this.lightTokenLimits.string()}`)
info(`review_token_limits: ${this.heavyTokenLimits.string()}`)
- info(`api_base_url: ${this.apiBaseUrl}`)
+ // info(`api_base_url: ${this.apiBaseUrl}`)
info(`language: ${this.language}`)
}
@@ -138,11 +154,11 @@ export class PathFilter {
}
}
-export class OpenAIOptions {
+export class VertexAIOptions {
model: string
tokenLimits: TokenLimits
- constructor(model = 'gpt-3.5-turbo', tokenLimits: TokenLimits | null = null) {
+ constructor(model = 'gemini-pro', tokenLimits: TokenLimits | null = null) {
this.model = model
if (tokenLimits != null) {
this.tokenLimits = tokenLimits
diff --git a/src/review-comment.ts b/src/review-comment.ts
index f42c53db..9f778fbe 100644
--- a/src/review-comment.ts
+++ b/src/review-comment.ts
@@ -17,7 +17,7 @@ import {getTokenCount} from './tokenizer'
// eslint-disable-next-line camelcase
const context = github_context
const repo = context.repo
-const ASK_BOT = '@coderabbitai'
+const ASK_BOT = '/aireviewer'
export const handleReviewComment = async (
heavyBot: Bot,
@@ -172,7 +172,7 @@ export const handleReviewComment = async (
}
}
- const [reply] = await heavyBot.chat(prompts.renderComment(inputs), {})
+ const reply = await heavyBot.chat(prompts.renderComment(inputs))
await commenter.reviewCommentReply(pullNumber, topLevelComment, reply)
}
diff --git a/src/review.ts b/src/review.ts
index 5e7dd9db..f38b3609 100644
--- a/src/review.ts
+++ b/src/review.ts
@@ -22,7 +22,7 @@ import {getTokenCount} from './tokenizer'
const context = github_context
const repo = context.repo
-const ignoreKeyword = '@coderabbitai: ignore'
+const ignoreKeyword = '/aireviewer: ignore'
export const codeReview = async (
lightBot: Bot,
@@ -32,7 +32,7 @@ export const codeReview = async (
): Promise => {
const commenter: Commenter = new Commenter()
- const openaiConcurrencyLimit = pLimit(options.openaiConcurrencyLimit)
+ const vertexaiConcurrencyLimit = pLimit(options.vertexaiConcurrencyLimit)
const githubConcurrencyLimit = pLimit(options.githubConcurrencyLimit)
if (
@@ -337,11 +337,11 @@ ${
// summarize content
try {
- const [summarizeResp] = await lightBot.chat(summarizePrompt, {})
+ const summarizeResp = await lightBot.chat(summarizePrompt)
- if (summarizeResp === '') {
- info('summarize: nothing obtained from openai')
- summariesFailed.push(`${filename} (nothing obtained from openai)`)
+ if (!summarizeResp) {
+ info('summarize: nothing obtained from vertexai')
+ summariesFailed.push(`${filename} (nothing obtained from vertexai)`)
return null
} else {
if (options.reviewSimpleChanges === false) {
@@ -364,8 +364,10 @@ ${
return [filename, summarizeResp, true]
}
} catch (e: any) {
- warning(`summarize: error from openai: ${e as string}`)
- summariesFailed.push(`${filename} (error from openai: ${e as string})})`)
+ warning(`summarize: error from vertexai: ${e as string}`)
+ summariesFailed.push(
+ `${filename} (error from vertexai: ${e as string})})`
+ )
return null
}
}
@@ -375,7 +377,7 @@ ${
for (const [filename, fileContent, fileDiff] of filesAndChanges) {
if (options.maxFiles <= 0 || summaryPromises.length < options.maxFiles) {
summaryPromises.push(
- openaiConcurrencyLimit(
+ vertexaiConcurrencyLimit(
async () => await doSummary(filename, fileContent, fileDiff)
)
)
@@ -399,13 +401,12 @@ ${
${filename}: ${summary}
`
}
- // ask chatgpt to summarize the summaries
- const [summarizeResp] = await heavyBot.chat(
- prompts.renderSummarizeChangesets(inputs),
- {}
+ // ask model to summarize the summaries
+ const summarizeResp = await heavyBot.chat(
+ prompts.renderSummarizeChangesets(inputs)
)
- if (summarizeResp === '') {
- warning('summarize: nothing obtained from openai')
+ if (!summarizeResp) {
+ warning('summarize: nothing obtained from vertexai')
} else {
inputs.rawSummary = summarizeResp
}
@@ -413,24 +414,22 @@ ${filename}: ${summary}
}
// final summary
- const [summarizeFinalResponse] = await heavyBot.chat(
- prompts.renderSummarize(inputs),
- {}
+ const summarizeFinalResponse = await heavyBot.chat(
+ prompts.renderSummarize(inputs)
)
- if (summarizeFinalResponse === '') {
- info('summarize: nothing obtained from openai')
+ if (!summarizeFinalResponse) {
+ info('summarize: nothing obtained from vertexai')
}
if (options.disableReleaseNotes === false) {
// final release notes
- const [releaseNotesResponse] = await heavyBot.chat(
- prompts.renderSummarizeReleaseNotes(inputs),
- {}
+ const releaseNotesResponse = await heavyBot.chat(
+ prompts.renderSummarizeReleaseNotes(inputs)
)
- if (releaseNotesResponse === '') {
- info('release notes: nothing obtained from openai')
+ if (!releaseNotesResponse) {
+ info('release notes: nothing obtained from vertexai')
} else {
- let message = '### Summary by CodeRabbit\n\n'
+ let message = '### Summary by AI Reviewer\n\n'
message += releaseNotesResponse
try {
await commenter.updateDescription(
@@ -444,9 +443,8 @@ ${filename}: ${summary}
}
// generate a short summary as well
- const [summarizeShortResponse] = await heavyBot.chat(
- prompts.renderSummarizeShort(inputs),
- {}
+ const summarizeShortResponse = await heavyBot.chat(
+ prompts.renderSummarizeShort(inputs)
)
inputs.shortSummary = summarizeShortResponse
@@ -457,17 +455,6 @@ ${RAW_SUMMARY_END_TAG}
${SHORT_SUMMARY_START_TAG}
${inputs.shortSummary}
${SHORT_SUMMARY_END_TAG}
-
----
-
-
-Uplevel your code reviews with CodeRabbit Pro
-
-### CodeRabbit Pro
-
-If you like this project, please support us by purchasing the [Pro version](https://coderabbit.ai). The Pro version has advanced context, superior noise reduction and several proprietary improvements compared to the open source version. Moreover, CodeRabbit Pro is free for open source projects.
-
-
`
statusMsg += `
@@ -619,12 +606,11 @@ ${commentChain}
if (patchesPacked > 0) {
// perform review
try {
- const [response] = await heavyBot.chat(
- prompts.renderReviewFileDiff(ins),
- {}
+ const response = await heavyBot.chat(
+ prompts.renderReviewFileDiff(ins)
)
- if (response === '') {
- info('review: nothing obtained from openai')
+ if (!response) {
+ info('review: nothing obtained from vertexai')
reviewsFailed.push(`${filename} (no response)`)
return
}
@@ -674,7 +660,7 @@ ${commentChain}
for (const [filename, fileContent, , patches] of filesAndChangesReview) {
if (options.maxFiles <= 0 || reviewPromises.length < options.maxFiles) {
reviewPromises.push(
- openaiConcurrencyLimit(async () => {
+ vertexaiConcurrencyLimit(async () => {
await doReview(filename, fileContent, patches)
})
)
@@ -723,16 +709,16 @@ ${
Tips
-### Chat with CodeRabbit Bot (\`@coderabbitai\`)
+### Chat with AI Reviewer (\`/aireviewer\`)
- Reply on review comments left by this bot to ask follow-up questions. A review comment is a comment on a diff or a file.
-- Invite the bot into a review comment chain by tagging \`@coderabbitai\` in a reply.
+- Invite the bot into a review comment chain by tagging \`/aireviewer\` in a reply.
### Code suggestions
- The bot may make code suggestions, but please review them carefully before committing since the line number ranges may be misaligned.
- You can edit the comment made by the bot and manually tweak the suggestion if it is slightly off.
### Pausing incremental reviews
-- Add \`@coderabbitai: ignore\` anywhere in the PR description to pause further reviews from the bot.
+- Add \`/aireviewer: ignore\` anywhere in the PR description to pause further reviews from the bot.
`