Skip to content

Commit

Permalink
v1.3.8
Browse files Browse the repository at this point in the history
Fix gpt-4o-mini & gpt-4o-mini-2024-07-18
  • Loading branch information
Cainier committed Jul 30, 2024
1 parent f83f200 commit 13752ff
Show file tree
Hide file tree
Showing 6 changed files with 40 additions and 25 deletions.
2 changes: 1 addition & 1 deletion dist/index.d.ts
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
import { Tiktoken } from 'js-tiktoken';
import { TokenPrice } from './tokenPrice';
import type { Tiktoken } from 'js-tiktoken';
import type { supportModelType } from './pricing';
export type { supportModelType };
interface MessageItem {
Expand Down
9 changes: 8 additions & 1 deletion dist/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,14 @@ class GPTTokens extends tokenPrice_1.TokenPrice {
const modelEncodingCache = GPTTokens.modelEncodingCache;
if (!modelEncodingCache[model]) {
try {
modelEncodingCache[model] = (0, js_tiktoken_1.encodingForModel)(model);
let jsTikTokenSupportModel;
if (model === 'gpt-4o-mini' || model === 'gpt-4o-mini-2024-07-18') {
jsTikTokenSupportModel = 'gpt-4o';
}
else {
jsTikTokenSupportModel = model;
}
modelEncodingCache[model] = (0, js_tiktoken_1.encodingForModel)(jsTikTokenSupportModel);
}
catch (e) {
console.error('Model not found. Using cl100k_base encoding.');
Expand Down
4 changes: 2 additions & 2 deletions package-lock.json

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 1 addition & 1 deletion package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "gpt-tokens",
"version": "1.3.7",
"version": "1.3.8",
"description": "Calculate the token consumption and amount of openai gpt message",
"keywords": [
"gpt",
Expand Down
41 changes: 25 additions & 16 deletions src/index.ts
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
import { Tiktoken } from 'js-tiktoken'
import { encodingForModel, getEncoding } from 'js-tiktoken'
import { promptTokensEstimate } from 'openai-chat-tokens'
import { TokenPrice } from './tokenPrice'
import type { Tiktoken } from 'js-tiktoken'
import type { TiktokenModel } from 'js-tiktoken'
import type { supportModelType } from './pricing'

export type { supportModelType }

interface MessageItem {
name? : string
role : 'system' | 'user' | 'assistant'
name?: string
role: 'system' | 'user' | 'assistant'
content: string
}

Expand All @@ -19,24 +20,32 @@ export class GPTTokens extends TokenPrice {
const modelEncodingCache = GPTTokens.modelEncodingCache
if (!modelEncodingCache[model]) {
try {
modelEncodingCache[model] = encodingForModel(model as Parameters<typeof encodingForModel>[0])
let jsTikTokenSupportModel: TiktokenModel

if (model === 'gpt-4o-mini' || model === 'gpt-4o-mini-2024-07-18') {
jsTikTokenSupportModel = 'gpt-4o'
} else {
jsTikTokenSupportModel = model
}

modelEncodingCache[model] = encodingForModel(jsTikTokenSupportModel)
} catch (e) {
console.error('Model not found. Using cl100k_base encoding.')

modelEncodingCache[model] = getEncoding('cl100k_base')
}
}

return modelEncodingCache[model]!
}

constructor (options: {
model? : supportModelType
model?: supportModelType
fineTuneModel?: string
messages? : GPTTokens['messages']
training? : GPTTokens['training']
tools? : GPTTokens['tools']
debug? : boolean
messages?: GPTTokens['messages']
training?: GPTTokens['training']
tools?: GPTTokens['tools']
debug?: boolean
}) {
super()

Expand Down Expand Up @@ -95,16 +104,16 @@ export class GPTTokens extends TokenPrice {
public readonly tools?: {
type: 'function'
function: {
name : string
name: string
description?: string
parameters : Record<string, unknown>
parameters: Record<string, unknown>
}
} []

// Used USD
public get usedUSD () {
if (this.training) return this.trainPrice(this.model, this.usedTokens)
if (this.tools) return this.inputPrice(this.model, this.usedTokens)
if (this.tools) return this.inputPrice(this.model, this.usedTokens)

return this.totalPrice(this.fineTuneModel
? `ft:${this.model}`
Expand Down Expand Up @@ -184,9 +193,9 @@ export class GPTTokens extends TokenPrice {
* @throws If the model is not supported.
*/
private static num_tokens_from_messages (messages: MessageItem [], model: supportModelType) {
let encoding! : Tiktoken
let encoding!: Tiktoken
let tokens_per_message!: number
let tokens_per_name ! : number
let tokens_per_name !: number

let num_tokens = 0

Expand Down
7 changes: 3 additions & 4 deletions tests/index.js
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,9 @@ const OpenAI = require('openai')
const { GPTTokens } = require('../dist/index')

const [
apiKey = process.env.OPENAI_API_KEY,
fineTuneModel = process.env.FINE_TUNE_MODEL,
] = process.argv.slice(2)
apiKey = process.env.OPENAI_API_KEY,
fineTuneModel = process.env.FINE_TUNE_MODEL,
] = process.argv.slice(2)

if (!apiKey) {
console.error('No API key provided. Ignoring test.')
Expand Down Expand Up @@ -110,7 +110,6 @@ function testPerformance(messages) {
console.time('GPTTokens')

const usageInfo = new GPTTokens({
plus : false,
model: 'gpt-3.5-turbo-0613',
messages,
})
Expand Down

0 comments on commit 13752ff

Please sign in to comment.