Skip to content

Commit 40a30ac

Browse files
committed
fix(deps): unify tokenlens via catalog and migrate to v2 API
Splits in tokenlens versions (api/dashboard/shared on ^1.3.1, ai on ^2.0.0-alpha.3) produced workspace-scoped lockfile sub-entries that turbo prune couldn't resolve — breaking Docker builds with "Failed to resolve prod dependency '@tokenlens/fetch' for package 'tokenlens'". Consolidates on ^2.0.0-alpha.3 through a new catalog entry and migrates the two v1 call sites (api usage telemetry, dashboard context components) to v2's sync computeTokenCostsForModel with vercelModels passed as the source — no remote catalog fetches.
1 parent d8532c9 commit 40a30ac

8 files changed

Lines changed: 91 additions & 86 deletions

File tree

apps/api/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -53,7 +53,7 @@
5353
"resend": "^4.0.1",
5454
"supermemory": "^4.17.0",
5555
"svix": "^1.84.1",
56-
"tokenlens": "^1.3.1",
56+
"tokenlens": "catalog:",
5757
"zod": "catalog:",
5858
"zod-to-json-schema": "^3.25.1"
5959
}

apps/api/src/lib/usage-telemetry.ts

Lines changed: 35 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,25 @@
11
import type { LanguageModelUsage } from "ai";
2-
import { getUsage } from "tokenlens/helpers";
2+
import type { SourceModel } from "tokenlens";
3+
import { computeTokenCostsForModel } from "tokenlens/helpers";
34
import { vercelModels } from "tokenlens/providers/vercel";
45

6+
type VercelModelId = keyof typeof vercelModels.models;
7+
8+
const lookupModel = (modelId: string): SourceModel | undefined => {
9+
const model = vercelModels.models[modelId as VercelModelId];
10+
return model
11+
? ({ canonical_id: model.id, ...model } as unknown as SourceModel)
12+
: undefined;
13+
};
14+
15+
const toUsage = (usage: LanguageModelUsage) => ({
16+
input_tokens: usage.inputTokens,
17+
output_tokens: usage.outputTokens,
18+
cache_read_tokens: usage.inputTokenDetails?.cacheReadTokens,
19+
cache_write_tokens: usage.inputTokenDetails?.cacheWriteTokens,
20+
reasoning_tokens: usage.outputTokenDetails?.reasoningTokens,
21+
});
22+
523
/**
624
* Best-effort token telemetry for the agent route.
725
*
@@ -53,20 +71,19 @@ export function summarizeAgentUsage(
5371
num(usage.inputTokenDetails?.noCacheTokens) ||
5472
Math.max(0, inputTokens - cacheReadTokens - cacheWriteTokens);
5573

74+
const normalizedUsage = toUsage(usage);
5675
let costModelId = modelId;
57-
let costs = getUsage({
58-
modelId,
59-
usage,
60-
providers: vercelModels,
61-
}).costUSD;
76+
let model = lookupModel(modelId);
77+
let costs = model
78+
? computeTokenCostsForModel({ model, usage: normalizedUsage })
79+
: undefined;
6280

63-
if (costs?.totalUSD === undefined) {
81+
if (costs === undefined || costs.totalTokenCostUSD === 0) {
6482
costModelId = FALLBACK_MODEL_ID;
65-
costs = getUsage({
66-
modelId: FALLBACK_MODEL_ID,
67-
usage,
68-
providers: vercelModels,
69-
}).costUSD;
83+
model = lookupModel(FALLBACK_MODEL_ID);
84+
costs = model
85+
? computeTokenCostsForModel({ model, usage: normalizedUsage })
86+
: undefined;
7087
}
7188

7289
return {
@@ -77,12 +94,12 @@ export function summarizeAgentUsage(
7794
cache_read_tokens: cacheReadTokens,
7895
cache_write_tokens: cacheWriteTokens,
7996
reasoning_tokens: num(usage.outputTokenDetails?.reasoningTokens),
80-
cost_input_usd: num(costs?.inputUSD),
81-
cost_output_usd: num(costs?.outputUSD),
82-
cost_total_usd: num(costs?.totalUSD),
83-
cost_cache_read_usd: num(costs?.cacheReadUSD),
84-
cost_cache_write_usd: num(costs?.cacheWriteUSD),
85-
cost_reasoning_usd: num(costs?.reasoningUSD),
97+
cost_input_usd: num(costs?.inputTokenCostUSD),
98+
cost_output_usd: num(costs?.outputTokenCostUSD),
99+
cost_total_usd: num(costs?.totalTokenCostUSD),
100+
cost_cache_read_usd: num(costs?.cacheReadTokenCostUSD),
101+
cost_cache_write_usd: num(costs?.cacheWriteTokenCostUSD),
102+
cost_reasoning_usd: num(costs?.reasoningTokenCostUSD),
86103
cost_model_id: costModelId,
87104
cost_fallback: costModelId !== modelId,
88105
};

apps/dashboard/components/ai-elements/context.tsx

Lines changed: 45 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,5 @@
11
"use client";
22

3-
import type { LanguageModelUsage } from "ai";
4-
import { type ComponentProps, createContext, useContext } from "react";
5-
import { getUsage } from "tokenlens";
63
import { Button } from "@/components/ui/button";
74
import {
85
HoverCard,
@@ -11,6 +8,20 @@ import {
118
} from "@/components/ui/hover-card";
129
import { Progress } from "@/components/ui/progress";
1310
import { cn } from "@/lib/utils";
11+
import type { LanguageModelUsage } from "ai";
12+
import { type ComponentProps, createContext, useContext } from "react";
13+
import type { SourceModel } from "tokenlens";
14+
import { computeTokenCostsForModel } from "tokenlens/helpers";
15+
import { vercelModels } from "tokenlens/providers/vercel";
16+
17+
type VercelModelId = keyof typeof vercelModels.models;
18+
19+
const lookupModel = (modelId: string): SourceModel | undefined => {
20+
const model = vercelModels.models[modelId as VercelModelId];
21+
return model
22+
? ({ canonical_id: model.id, ...model } as unknown as SourceModel)
23+
: undefined;
24+
};
1425

1526
const PERCENT_MAX = 100;
1627
const ICON_RADIUS = 10;
@@ -196,14 +207,15 @@ export const ContextContentFooter = ({
196207
...props
197208
}: ContextContentFooterProps) => {
198209
const { modelId, usage } = useContextValue();
199-
const costUSD = modelId
200-
? getUsage({
201-
modelId,
210+
const model = modelId ? lookupModel(modelId) : undefined;
211+
const costUSD = model
212+
? computeTokenCostsForModel({
213+
model,
202214
usage: {
203-
input: usage?.inputTokens ?? 0,
204-
output: usage?.outputTokens ?? 0,
215+
input_tokens: usage?.inputTokens ?? 0,
216+
output_tokens: usage?.outputTokens ?? 0,
205217
},
206-
}).costUSD?.totalUSD
218+
}).totalTokenCostUSD
207219
: undefined;
208220
const totalCost = new Intl.NumberFormat("en-US", {
209221
style: "currency",
@@ -246,11 +258,12 @@ export const ContextInputUsage = ({
246258
return null;
247259
}
248260

249-
const inputCost = modelId
250-
? getUsage({
251-
modelId,
252-
usage: { input: inputTokens, output: 0 },
253-
}).costUSD?.totalUSD
261+
const inputModel = modelId ? lookupModel(modelId) : undefined;
262+
const inputCost = inputModel
263+
? computeTokenCostsForModel({
264+
model: inputModel,
265+
usage: { input_tokens: inputTokens, output_tokens: 0 },
266+
}).totalTokenCostUSD
254267
: undefined;
255268
const inputCostText = new Intl.NumberFormat("en-US", {
256269
style: "currency",
@@ -286,11 +299,12 @@ export const ContextOutputUsage = ({
286299
return null;
287300
}
288301

289-
const outputCost = modelId
290-
? getUsage({
291-
modelId,
292-
usage: { input: 0, output: outputTokens },
293-
}).costUSD?.totalUSD
302+
const outputModel = modelId ? lookupModel(modelId) : undefined;
303+
const outputCost = outputModel
304+
? computeTokenCostsForModel({
305+
model: outputModel,
306+
usage: { input_tokens: 0, output_tokens: outputTokens },
307+
}).totalTokenCostUSD
294308
: undefined;
295309
const outputCostText = new Intl.NumberFormat("en-US", {
296310
style: "currency",
@@ -326,11 +340,12 @@ export const ContextReasoningUsage = ({
326340
return null;
327341
}
328342

329-
const reasoningCost = modelId
330-
? getUsage({
331-
modelId,
332-
usage: { reasoningTokens },
333-
}).costUSD?.totalUSD
343+
const reasoningModel = modelId ? lookupModel(modelId) : undefined;
344+
const reasoningCost = reasoningModel
345+
? computeTokenCostsForModel({
346+
model: reasoningModel,
347+
usage: { reasoning_tokens: reasoningTokens },
348+
}).totalTokenCostUSD
334349
: undefined;
335350
const reasoningCostText = new Intl.NumberFormat("en-US", {
336351
style: "currency",
@@ -366,11 +381,12 @@ export const ContextCacheUsage = ({
366381
return null;
367382
}
368383

369-
const cacheCost = modelId
370-
? getUsage({
371-
modelId,
372-
usage: { cacheReads: cacheTokens, input: 0, output: 0 },
373-
}).costUSD?.totalUSD
384+
const cacheModel = modelId ? lookupModel(modelId) : undefined;
385+
const cacheCost = cacheModel
386+
? computeTokenCostsForModel({
387+
model: cacheModel,
388+
usage: { cache_read_tokens: cacheTokens },
389+
}).totalTokenCostUSD
374390
: undefined;
375391
const cacheCostText = new Intl.NumberFormat("en-US", {
376392
style: "currency",

apps/dashboard/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@
9191
"sonner": "^2.0.7",
9292
"streamdown": "^2.5.0",
9393
"tailwind-merge": "^3.5.0",
94-
"tokenlens": "^1.3.1",
94+
"tokenlens": "catalog:",
9595
"tw-animate-css": "^1.4.0",
9696
"use-stick-to-bottom": "^1.1.3",
9797
"vaul": "^1.1.2",

bun.lock

Lines changed: 5 additions & 34 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,8 @@
9999
"drizzle-orm": "^0.45.2",
100100
"nanoid": "^5.1.6",
101101
"ua-parser-js": "^2.0.8",
102-
"evlog": "^2.11.1"
102+
"evlog": "^2.11.1",
103+
"tokenlens": "^2.0.0-alpha.3"
103104
}
104105
}
105106
}

packages/ai/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
"@types/node": "^20.0.0",
1212
"ai": "^6.0.0",
1313
"openai": "^4.77.0",
14-
"tokenlens": "^2.0.0-alpha.3",
14+
"tokenlens": "catalog:",
1515
"typescript": "catalog:",
1616
"unbuild": "^3.6.1"
1717
},

packages/shared/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -52,7 +52,7 @@
5252
"drizzle-orm": "catalog:",
5353
"evlog": "catalog:",
5454
"nanoid": "catalog:",
55-
"tokenlens": "^1.3.1",
55+
"tokenlens": "catalog:",
5656
"ua-parser-js": "catalog:",
5757
"zod": "catalog:"
5858
}

0 commit comments

Comments
 (0)