diff --git a/apps/ccusage/config-schema.json b/apps/ccusage/config-schema.json index 34392dde..b6868a61 100644 --- a/apps/ccusage/config-schema.json +++ b/apps/ccusage/config-schema.json @@ -73,6 +73,17 @@ "markdownDescription": "Use cached pricing data for Claude models instead of fetching from API", "default": false }, + "pricingSource": { + "type": "string", + "enum": [ + "auto", + "litellm", + "modelsdev" + ], + "description": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "markdownDescription": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "default": "auto" + }, "color": { "type": "boolean", "description": "Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.", @@ -177,6 +188,17 @@ "markdownDescription": "Use cached pricing data for Claude models instead of fetching from API", "default": false }, + "pricingSource": { + "type": "string", + "enum": [ + "auto", + "litellm", + "modelsdev" + ], + "description": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "markdownDescription": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "default": "auto" + }, "color": { "type": "boolean", "description": "Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.", @@ -292,6 +314,17 @@ "markdownDescription": "Use cached pricing data for Claude models instead of fetching from API", "default": false }, + "pricingSource": { + "type": "string", + "enum": [ + "auto", + "litellm", + "modelsdev" + ], + "description": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "markdownDescription": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "default": "auto" + }, "color": { "type": "boolean", "description": "Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.", @@ -391,6 +424,17 @@ "markdownDescription": "Use cached pricing data for Claude models instead of fetching from API", "default": false }, + "pricingSource": { + "type": "string", + "enum": [ + "auto", + "litellm", + "modelsdev" + ], + "description": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "markdownDescription": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "default": "auto" + }, "color": { "type": "boolean", "description": "Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.", @@ -495,6 +539,17 @@ "markdownDescription": "Use cached pricing data for Claude models instead of fetching from API", "default": false }, + "pricingSource": { + "type": "string", + "enum": [ + "auto", + "litellm", + "modelsdev" + ], + "description": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "markdownDescription": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "default": "auto" + }, "color": { "type": "boolean", "description": "Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.", @@ -599,6 +654,17 @@ "markdownDescription": "Use cached pricing data for Claude models instead of fetching from API", "default": false }, + "pricingSource": { + "type": "string", + "enum": [ + "auto", + "litellm", + "modelsdev" + ], + "description": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "markdownDescription": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "default": "auto" + }, "color": { "type": "boolean", "description": "Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.", @@ -666,6 +732,17 @@ "markdownDescription": "Use cached pricing data for Claude models instead of fetching from API", "default": true }, + "pricingSource": { + "type": "string", + "enum": [ + "auto", + "litellm", + "modelsdev" + ], + "description": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "markdownDescription": "Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)", + "default": "auto" + }, "visualBurnRate": { "type": "string", "enum": [ diff --git a/apps/ccusage/package.json b/apps/ccusage/package.json index a1220799..9df96d03 100644 --- a/apps/ccusage/package.json +++ b/apps/ccusage/package.json @@ -1,19 +1,19 @@ { "name": "ccusage", - "type": "module", "version": "17.1.8", "description": "Usage analysis tool for Claude Code", - "author": "ryoppippi", - "license": "MIT", - "funding": "https://github.com/ryoppippi/ccusage?sponsor=1", "homepage": "https://github.com/ryoppippi/ccusage#readme", + "bugs": { + "url": "https://github.com/ryoppippi/ccusage/issues" + }, "repository": { "type": "git", "url": "git+https://github.com/ryoppippi/ccusage.git" }, - "bugs": { - "url": "https://github.com/ryoppippi/ccusage/issues" - }, + "funding": "https://github.com/ryoppippi/ccusage?sponsor=1", + "license": "MIT", + "author": "ryoppippi", + "type": "module", "exports": { ".": "./src/index.ts", "./calculate-cost": "./src/calculate-cost.ts", @@ -32,9 +32,6 @@ "config-schema.json", "dist" ], - "engines": { - "node": ">=20.19.4" - }, "scripts": { "build": "pnpm run generate:schema && tsdown", "format": "pnpm run lint --fix", @@ -90,6 +87,9 @@ "vitest": "catalog:testing", "xdg-basedir": "catalog:runtime" }, + "engines": { + "node": ">=20.19.4" + }, "publishConfig": { "bin": { "ccusage": "./dist/index.js" diff --git a/apps/ccusage/src/_live-monitor.ts b/apps/ccusage/src/_live-monitor.ts index 2c380881..bf6216a2 100644 --- a/apps/ccusage/src/_live-monitor.ts +++ b/apps/ccusage/src/_live-monitor.ts @@ -9,7 +9,7 @@ */ import type { LoadedUsageEntry, SessionBlock } from './_session-blocks.ts'; -import type { CostMode, SortOrder } from './_types.ts'; +import type { CostMode, PricingSource, SortOrder } from './_types.ts'; import { readFile, stat } from 'node:fs/promises'; import { Result } from '@praha/byethrow'; import pLimit from 'p-limit'; @@ -34,6 +34,7 @@ export type LiveMonitorConfig = { sessionDurationHours: number; mode: CostMode; order: SortOrder; + pricingSource: PricingSource; }; /** @@ -72,7 +73,7 @@ async function isRecentFile(filePath: string, cutoffTime: Date): Promise PREFETCHED_CLAUDE_PRICING, logger, providerPrefixes: CLAUDE_PROVIDER_PREFIXES, + useModelsDev, }); } } diff --git a/apps/ccusage/src/_shared-args.ts b/apps/ccusage/src/_shared-args.ts index e6ec5f27..493268b8 100644 --- a/apps/ccusage/src/_shared-args.ts +++ b/apps/ccusage/src/_shared-args.ts @@ -1,8 +1,13 @@ import type { Args } from 'gunshi'; -import type { CostMode, SortOrder } from './_types.ts'; +import type { CostMode, PricingSource, SortOrder } from './_types.ts'; import * as v from 'valibot'; import { DEFAULT_LOCALE } from './_consts.ts'; -import { CostModes, filterDateSchema, SortOrders } from './_types.ts'; +import { + CostModes, + filterDateSchema, + PricingSources, + SortOrders, +} from './_types.ts'; /** * Parses and validates a date argument in YYYYMMDD format @@ -39,7 +44,7 @@ export const sharedArgs = { type: 'enum', short: 'm', description: - 'Cost calculation mode: auto (use costUSD if exists, otherwise calculate), calculate (always calculate), display (always use costUSD)', + 'Cost calculation mode: auto (use costUSD if exists, otherwise calculate), calculate (always calculate), display (always use costUSD)', default: 'auto' as const satisfies CostMode, choices: CostModes, }, @@ -52,7 +57,7 @@ export const sharedArgs = { debugSamples: { type: 'number', description: - 'Number of sample discrepancies to show in debug output (default: 5)', + 'Number of sample discrepancies to show in debug output (default: 5)', default: 5, }, order: { @@ -72,21 +77,35 @@ export const sharedArgs = { type: 'boolean', negatable: true, short: 'O', - description: 'Use cached pricing data for Claude models instead of fetching from API', + description: + 'Use cached pricing data for Claude models instead of fetching from API', default: false, }, - color: { // --color and FORCE_COLOR=1 is handled by picocolors + pricingSource: { + type: 'enum', + short: 'p', + description: + 'Pricing data source: auto (merge LiteLLM + models.dev), litellm (LiteLLM only), modelsdev (models.dev only)', + default: 'auto' as const satisfies PricingSource, + choices: PricingSources, + }, + color: { + // --color and FORCE_COLOR=1 is handled by picocolors type: 'boolean', - description: 'Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.', + description: + 'Enable colored output (default: auto). FORCE_COLOR=1 has the same effect.', }, - noColor: { // --no-color and NO_COLOR=1 is handled by picocolors + noColor: { + // --no-color and NO_COLOR=1 is handled by picocolors type: 'boolean', - description: 'Disable colored output (default: auto). NO_COLOR=1 has the same effect.', + description: + 'Disable colored output (default: auto). NO_COLOR=1 has the same effect.', }, timezone: { type: 'string', short: 'z', - description: 'Timezone for date grouping (e.g., UTC, America/New_York, Asia/Tokyo). Default: system timezone', + description: + 'Timezone for date grouping (e.g., UTC, America/New_York, Asia/Tokyo). Default: system timezone', }, locale: { type: 'string', @@ -97,7 +116,8 @@ export const sharedArgs = { jq: { type: 'string', short: 'q', - description: 'Process JSON output with jq command (requires jq binary, implies --json)', + description: + 'Process JSON output with jq command (requires jq binary, implies --json)', }, config: { type: 'string', @@ -105,7 +125,8 @@ export const sharedArgs = { }, compact: { type: 'boolean', - description: 'Force compact mode for narrow displays (better for screenshots)', + description: + 'Force compact mode for narrow displays (better for screenshots)', default: false, }, } as const satisfies Args; diff --git a/apps/ccusage/src/_types.ts b/apps/ccusage/src/_types.ts index 7b5ca0f9..be82ef91 100644 --- a/apps/ccusage/src/_types.ts +++ b/apps/ccusage/src/_types.ts @@ -140,6 +140,19 @@ export const CostModes = ['auto', 'calculate', 'display'] as const; */ export type CostMode = TupleToUnion; +/** + * Available pricing data sources + * - auto: Use both LiteLLM and models.dev (merged, LiteLLM takes precedence) + * - litellm: Use only LiteLLM pricing data + * - modelsdev: Use only models.dev pricing data + */ +export const PricingSources = ['auto', 'litellm', 'modelsdev'] as const; + +/** + * Union type for pricing sources + */ +export type PricingSource = TupleToUnion; + /** * Available sort orders for data presentation */ diff --git a/apps/ccusage/src/commands/_blocks.live.ts b/apps/ccusage/src/commands/_blocks.live.ts index 0a6b78df..b7889b8e 100644 --- a/apps/ccusage/src/commands/_blocks.live.ts +++ b/apps/ccusage/src/commands/_blocks.live.ts @@ -35,6 +35,7 @@ export async function startLiveMonitoring(config: LiveMonitoringConfig): Promise sessionDurationHours: config.sessionDurationHours, mode: config.mode, order: config.order, + pricingSource: config.pricingSource, }; using monitorState = createLiveMonitorState(monitorConfig); diff --git a/apps/ccusage/src/commands/blocks.ts b/apps/ccusage/src/commands/blocks.ts index 57ef1520..70d147e2 100644 --- a/apps/ccusage/src/commands/blocks.ts +++ b/apps/ccusage/src/commands/blocks.ts @@ -164,14 +164,15 @@ export const blocksCommand = define({ } let blocks = await loadSessionBlockData({ - since: ctx.values.since, - until: ctx.values.until, - mode: ctx.values.mode, - order: ctx.values.order, - offline: ctx.values.offline, + since: mergedOptions.since, + until: mergedOptions.until, + mode: mergedOptions.mode, + order: mergedOptions.order, + offline: mergedOptions.offline, + pricingSource: mergedOptions.pricingSource, sessionDurationHours: ctx.values.sessionLength, - timezone: ctx.values.timezone, - locale: ctx.values.locale, + timezone: mergedOptions.timezone, + locale: mergedOptions.locale, }); if (blocks.length === 0) { @@ -252,8 +253,9 @@ export const blocksCommand = define({ tokenLimit: parseTokenLimit(tokenLimitValue, maxTokensFromAll), refreshInterval: refreshInterval * 1000, // Convert to milliseconds sessionDurationHours: ctx.values.sessionLength, - mode: ctx.values.mode, - order: ctx.values.order, + mode: mergedOptions.mode, + order: mergedOptions.order, + pricingSource: mergedOptions.pricingSource, }); return; // Exit early, don't show table } diff --git a/apps/ccusage/src/commands/session.ts b/apps/ccusage/src/commands/session.ts index 0b19f98e..3994244d 100644 --- a/apps/ccusage/src/commands/session.ts +++ b/apps/ccusage/src/commands/session.ts @@ -61,12 +61,13 @@ export const sessionCommand = define({ // Original session listing logic const sessionData = await loadSessionData({ - since: ctx.values.since, - until: ctx.values.until, - mode: ctx.values.mode, - offline: ctx.values.offline, - timezone: ctx.values.timezone, - locale: ctx.values.locale, + since: mergedOptions.since, + until: mergedOptions.until, + mode: mergedOptions.mode, + offline: mergedOptions.offline, + pricingSource: mergedOptions.pricingSource, + timezone: mergedOptions.timezone, + locale: mergedOptions.locale, }); if (sessionData.length === 0) { diff --git a/apps/ccusage/src/commands/statusline.ts b/apps/ccusage/src/commands/statusline.ts index cd1a9a20..55ba38f0 100644 --- a/apps/ccusage/src/commands/statusline.ts +++ b/apps/ccusage/src/commands/statusline.ts @@ -107,6 +107,7 @@ export const statuslineCommand = define({ ...sharedArgs.offline, default: true, // Default to offline mode for faster performance }, + pricingSource: sharedArgs.pricingSource, visualBurnRate: { type: 'enum', choices: visualBurnRateChoices, @@ -433,7 +434,7 @@ export const statuslineCommand = define({ // Calculate context tokens from transcript with model-specific limits const contextInfo = await Result.pipe( Result.try({ - try: calculateContextTokens(hookData.transcript_path, hookData.model.id, mergedOptions.offline), + try: calculateContextTokens(hookData.transcript_path, hookData.model.id, mergedOptions.offline, mergedOptions.pricingSource), catch: error => error, }), Result.inspectError(error => logger.debug(`Failed to calculate context tokens: ${error instanceof Error ? error.message : String(error)}`)), diff --git a/apps/ccusage/src/data-loader.ts b/apps/ccusage/src/data-loader.ts index 4a856cab..df7de5f7 100644 --- a/apps/ccusage/src/data-loader.ts +++ b/apps/ccusage/src/data-loader.ts @@ -15,6 +15,7 @@ import type { Bucket, CostMode, ModelName, + PricingSource, SortOrder, Version, } from './_types.ts'; @@ -729,6 +730,7 @@ export type LoadOptions = { mode?: CostMode; // Cost calculation mode order?: SortOrder; // Sort order for dates offline?: boolean; // Use offline mode for pricing + pricingSource?: PricingSource; // Pricing data source (auto, litellm, modelsdev) sessionDurationHours?: number; // Session block duration in hours groupByProject?: boolean; // Group data by project instead of aggregating project?: string; // Filter to specific project name @@ -771,7 +773,7 @@ export async function loadDailyUsageData( const mode = options?.mode ?? 'auto'; // Use PricingFetcher with using statement for automatic cleanup - using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline); + using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline ?? false, options?.pricingSource ?? 'auto'); // Track processed message+request combinations for deduplication const processedHashes = new Set(); @@ -921,7 +923,7 @@ export async function loadSessionData( const mode = options?.mode ?? 'auto'; // Use PricingFetcher with using statement for automatic cleanup - using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline); + using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline ?? false, options?.pricingSource ?? 'auto'); // Track processed message+request combinations for deduplication const processedHashes = new Set(); @@ -1095,7 +1097,7 @@ export async function loadWeeklyUsageData( */ export async function loadSessionUsageById( sessionId: string, - options?: { mode?: CostMode; offline?: boolean }, + options?: { mode?: CostMode; offline?: boolean; pricingSource?: PricingSource }, ): Promise<{ totalCost: number; entries: UsageData[] } | null> { const claudePaths = getClaudePaths(); @@ -1114,7 +1116,7 @@ export async function loadSessionUsageById( } const mode = options?.mode ?? 'auto'; - using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline); + using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline ?? false, options?.pricingSource ?? 'auto'); const entries: UsageData[] = []; let totalCost = 0; @@ -1232,7 +1234,7 @@ export async function loadBucketUsageData( * @param transcriptPath - Path to the transcript JSONL file * @returns Object with context tokens info or null if unavailable */ -export async function calculateContextTokens(transcriptPath: string, modelId?: string, offline = false): Promise<{ +export async function calculateContextTokens(transcriptPath: string, modelId?: string, offline = false, pricingSource: PricingSource = 'auto'): Promise<{ inputTokens: number; percentage: number; contextLimit: number; @@ -1276,7 +1278,7 @@ export async function calculateContextTokens(transcriptPath: string, modelId?: s // Get context limit from PricingFetcher let contextLimit = 200_000; // Fallback for when modelId is not provided if (modelId != null && modelId !== '') { - using fetcher = new PricingFetcher(offline); + using fetcher = new PricingFetcher(offline, pricingSource); const contextLimitResult = await fetcher.getModelContextLimit(modelId); if (Result.isSuccess(contextLimitResult) && contextLimitResult.value != null) { contextLimit = contextLimitResult.value; @@ -1351,7 +1353,7 @@ export async function loadSessionBlockData( const mode = options?.mode ?? 'auto'; // Use PricingFetcher with using statement for automatic cleanup - using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline); + using fetcher = mode === 'display' ? null : new PricingFetcher(options?.offline ?? false, options?.pricingSource ?? 'auto'); // Track processed message+request combinations for deduplication const processedHashes = new Set(); @@ -3561,7 +3563,7 @@ invalid json line describe('display mode', () => { it('should return costUSD when available', async () => { - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(mockUsageData, 'display', fetcher); expect(result).toBe(0.05); }); @@ -3570,14 +3572,14 @@ invalid json line const dataWithoutCost = { ...mockUsageData }; dataWithoutCost.costUSD = undefined; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(dataWithoutCost, 'display', fetcher); expect(result).toBe(0); }); it('should not use model pricing in display mode', async () => { // Even with model pricing available, should use costUSD - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(mockUsageData, 'display', fetcher); expect(result).toBe(0.05); }); @@ -3597,14 +3599,14 @@ invalid json line }, }; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(testData, 'calculate', fetcher); expect(result).toBeGreaterThan(0); }); it('should ignore costUSD in calculate mode', async () => { - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const dataWithHighCost = { ...mockUsageData, costUSD: 99.99 }; const result = await calculateCostForEntry( dataWithHighCost, @@ -3620,7 +3622,7 @@ invalid json line const dataWithoutModel = { ...mockUsageData }; dataWithoutModel.message.model = undefined; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(dataWithoutModel, 'calculate', fetcher); expect(result).toBe(0); }); @@ -3631,7 +3633,7 @@ invalid json line message: { ...mockUsageData.message, model: createModelName('unknown-model') }, }; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry( dataWithUnknownModel, 'calculate', @@ -3652,7 +3654,7 @@ invalid json line }, }; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry( dataWithoutCacheTokens, 'calculate', @@ -3665,7 +3667,7 @@ invalid json line describe('auto mode', () => { it('should use costUSD when available', async () => { - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(mockUsageData, 'auto', fetcher); expect(result).toBe(0.05); }); @@ -3682,7 +3684,7 @@ invalid json line }, }; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry( dataWithoutCost, 'auto', @@ -3696,7 +3698,7 @@ invalid json line dataWithoutCostOrModel.costUSD = undefined; dataWithoutCostOrModel.message.model = undefined; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(dataWithoutCostOrModel, 'auto', fetcher); expect(result).toBe(0); }); @@ -3705,14 +3707,14 @@ invalid json line const dataWithoutCost = { ...mockUsageData }; dataWithoutCost.costUSD = undefined; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(dataWithoutCost, 'auto', fetcher); expect(result).toBe(0); }); it('should prefer costUSD over calculation even when both available', async () => { // Both costUSD and model pricing available, should use costUSD - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(mockUsageData, 'auto', fetcher); expect(result).toBe(0.05); }); @@ -3734,21 +3736,21 @@ invalid json line }; dataWithZeroTokens.costUSD = undefined; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(dataWithZeroTokens, 'calculate', fetcher); expect(result).toBe(0); }); it('should handle costUSD of 0', async () => { const dataWithZeroCost = { ...mockUsageData, costUSD: 0 }; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(dataWithZeroCost, 'display', fetcher); expect(result).toBe(0); }); it('should handle negative costUSD', async () => { const dataWithNegativeCost = { ...mockUsageData, costUSD: -0.01 }; - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const result = await calculateCostForEntry(dataWithNegativeCost, 'display', fetcher); expect(result).toBe(-0.01); }); diff --git a/apps/ccusage/src/debug.ts b/apps/ccusage/src/debug.ts index 2572d3c8..e99b233e 100644 --- a/apps/ccusage/src/debug.ts +++ b/apps/ccusage/src/debug.ts @@ -92,7 +92,7 @@ export async function detectMismatches( }); // Use PricingFetcher with using statement for automatic cleanup - using fetcher = new PricingFetcher(); + using fetcher = new PricingFetcher(false, 'auto'); const stats: MismatchStats = { totalEntries: 0, diff --git a/apps/codex/package.json b/apps/codex/package.json index 7816ebcb..ee1cbdfe 100644 --- a/apps/codex/package.json +++ b/apps/codex/package.json @@ -1,19 +1,19 @@ { "name": "@ccusage/codex", - "type": "module", "version": "17.1.8", "description": "Usage analysis tool for OpenAI Codex sessions", - "author": "ryoppippi", - "license": "MIT", - "funding": "https://github.com/ryoppippi/ccusage?sponsor=1", "homepage": "https://github.com/ryoppippi/ccusage#readme", + "bugs": { + "url": "https://github.com/ryoppippi/ccusage/issues" + }, "repository": { "type": "git", "url": "git+https://github.com/ryoppippi/ccusage.git" }, - "bugs": { - "url": "https://github.com/ryoppippi/ccusage/issues" - }, + "funding": "https://github.com/ryoppippi/ccusage?sponsor=1", + "license": "MIT", + "author": "ryoppippi", + "type": "module", "main": "./dist/index.js", "module": "./dist/index.js", "bin": { @@ -22,9 +22,6 @@ "files": [ "dist" ], - "engines": { - "node": ">=20.19.4" - }, "scripts": { "build": "tsdown", "format": "pnpm run lint --fix", @@ -55,6 +52,9 @@ "valibot": "catalog:runtime", "vitest": "catalog:testing" }, + "engines": { + "node": ">=20.19.4" + }, "publishConfig": { "bin": { "ccusage-codex": "./dist/index.js" diff --git a/apps/mcp/package.json b/apps/mcp/package.json index c0c58295..ad559464 100644 --- a/apps/mcp/package.json +++ b/apps/mcp/package.json @@ -1,19 +1,19 @@ { "name": "@ccusage/mcp", - "type": "module", "version": "17.1.8", "description": "MCP server implementation for ccusage data", - "author": "ryoppippi", - "license": "MIT", - "funding": "https://github.com/ryoppippi/ccusage?sponsor=1", "homepage": "https://github.com/ryoppippi/ccusage#readme", + "bugs": { + "url": "https://github.com/ryoppippi/ccusage/issues" + }, "repository": { "type": "git", "url": "git+https://github.com/ryoppippi/ccusage.git" }, - "bugs": { - "url": "https://github.com/ryoppippi/ccusage/issues" - }, + "funding": "https://github.com/ryoppippi/ccusage?sponsor=1", + "license": "MIT", + "author": "ryoppippi", + "type": "module", "exports": { ".": "./src/index.ts", "./package.json": "./package.json" @@ -28,9 +28,6 @@ "README.md", "dist" ], - "engines": { - "node": ">=20.19.4" - }, "scripts": { "build": "tsdown", "dev": "bun -b --watch ./src/index.ts", @@ -65,6 +62,9 @@ "tsdown": "catalog:build", "vitest": "catalog:testing" }, + "engines": { + "node": ">=20.19.4" + }, "publishConfig": { "bin": { "ccusage-mcp": "./dist/index.js" diff --git a/docs/guide/cli-options.md b/docs/guide/cli-options.md index 4e7b7a9f..493dabb4 100644 --- a/docs/guide/cli-options.md +++ b/docs/guide/cli-options.md @@ -75,6 +75,28 @@ ccusage daily --offline ccusage daily -O ``` +### Pricing Source + +Choose the pricing data source: + +```bash +# Auto mode (default) - merge LiteLLM and models.dev +ccusage daily --pricing-source auto +ccusage daily -p auto + +# Use LiteLLM only +ccusage daily --pricing-source litellm +ccusage daily -p litellm + +# Use models.dev only +ccusage daily --pricing-source modelsdev +ccusage daily -p modelsdev +``` + +::: tip +The `auto` mode provides the most comprehensive model coverage by merging data from both LiteLLM and models.dev. See the [Pricing Sources guide](/guide/pricing-sources) for more details. +::: + ### Timezone Set the timezone for date calculations: diff --git a/docs/guide/configuration.md b/docs/guide/configuration.md index ff617c30..722039b2 100644 --- a/docs/guide/configuration.md +++ b/docs/guide/configuration.md @@ -83,7 +83,8 @@ For individual developers working on multiple projects: "$schema": "https://ccusage.com/config-schema.json", "defaults": { "breakdown": true, - "timezone": "local" + "timezone": "local", + "pricingSource": "auto" }, "commands": { "daily": { @@ -139,13 +140,16 @@ ccusage daily Control how costs are calculated: - **Mode**: `auto` (default), `calculate`, or `display` +- **Pricing Source**: `auto` (default), `litellm`, or `modelsdev` - **Offline**: Use cached pricing data - **Breakdown**: Show per-model costs ```bash -ccusage daily --mode calculate --breakdown --offline +ccusage daily --mode calculate --pricing-source auto --breakdown --offline ``` +Learn more about [pricing sources](/guide/pricing-sources). + ### Date and Time Customize date/time handling: diff --git a/docs/guide/pricing-sources.md b/docs/guide/pricing-sources.md new file mode 100644 index 00000000..aa138941 --- /dev/null +++ b/docs/guide/pricing-sources.md @@ -0,0 +1,188 @@ +# Pricing Data Sources + +ccusage supports multiple pricing data sources for accurate cost calculations. You can choose between LiteLLM, models.dev, or use both sources combined. + +## Quick Start + +### Command Line + +Use the `--pricing-source` or `-p` option to select your pricing data source: + +```bash +# Default mode (auto-merge LiteLLM and models.dev) +ccusage daily + +# Explicitly specify auto mode +ccusage daily --pricing-source auto +ccusage daily -p auto + +# Use LiteLLM only +ccusage daily --pricing-source litellm +ccusage daily -p litellm + +# Use models.dev only +ccusage daily --pricing-source modelsdev +ccusage daily -p modelsdev +``` + +### Configuration File + +Configure defaults in `.ccusage/ccusage.json` or `~/.config/claude/ccusage.json`: + +```json +{ + "defaults": { + "pricingSource": "auto" + }, + "commands": { + "daily": { + "pricingSource": "litellm" + }, + "monthly": { + "pricingSource": "modelsdev" + } + } +} +``` + +## Data Sources + +### `auto` (Default) + +- Merges pricing data from LiteLLM and models.dev +- LiteLLM data takes precedence when a model exists in both sources +- Provides the most comprehensive model coverage +- **Recommended for most users** + +### `litellm` + +- Uses LiteLLM pricing data only +- Source: [github.com/BerriAI/litellm](https://github.com/BerriAI/litellm) +- Best for maintaining consistency with official LiteLLM pricing +- Includes major AI providers (Anthropic, OpenAI, Google, etc.) + +### `modelsdev` + +- Uses models.dev pricing data only +- Source: [models.dev/api.json](https://models.dev/api.json) +- Includes additional providers (Moonshot AI, LucidQuery, etc.) +- Best for using non-mainstream or regional models + +## Supported Commands + +All commands that calculate costs support the `--pricing-source` option: + +- `ccusage daily` +- `ccusage weekly` +- `ccusage monthly` +- `ccusage session` +- `ccusage blocks` +- `ccusage blocks --live` + +## Configuration Priority + +Settings are applied in this order (highest to lowest priority): + +1. Command-line argument (`--pricing-source`) +2. Command-specific config in config file +3. `defaults` section in config file +4. Default value: `auto` + +## Offline Mode + +When using offline mode (`--offline`), the `--pricing-source` setting is ignored and only pre-cached Claude model pricing data is used. + +```bash +# In offline mode, --pricing-source is ignored +ccusage daily --offline --pricing-source modelsdev +# Actual behavior: Uses pre-cached LiteLLM Claude data +``` + +## Technical Details + +### Price Format Conversion + +models.dev uses "per million tokens" pricing, which ccusage automatically converts to "per token": + +``` +models.dev: $3 per million tokens +Converted: $0.000003 per token +``` + +### Model Matching + +Different data sources use different naming conventions: + +- LiteLLM: `anthropic/claude-sonnet-4-20250514` +- models.dev: `claude-sonnet-4-5` + +ccusage automatically handles model name matching and prefix normalization. + +## Examples + +### Comparing Costs Across Sources + +```bash +# View costs using LiteLLM data +ccusage daily --pricing-source litellm + +# View costs using models.dev data +ccusage daily --pricing-source modelsdev + +# View costs using merged data (recommended) +ccusage daily --pricing-source auto +``` + +### Per-Command Source Configuration + +```json +{ + "defaults": { + "pricingSource": "auto" + }, + "commands": { + "blocks": { + "pricingSource": "litellm", + "live": true + }, + "daily": { + "pricingSource": "modelsdev" + } + } +} +``` + +## Troubleshooting + +### Pricing Data Fetch Failures + +If a data source fails to fetch, ccusage automatically falls back: + +1. `auto` mode: If models.dev fails, still uses LiteLLM data +2. `litellm` mode: If LiteLLM fails, attempts to use offline cache +3. `modelsdev` mode: If models.dev fails, returns an error + +### Viewing Logs + +Use the `LOG_LEVEL` environment variable to see detailed pricing data loading logs: + +```bash +LOG_LEVEL=3 ccusage daily --pricing-source auto +``` + +This will display: +``` +ℹ Fetching latest model pricing from LiteLLM... +ℹ Loaded pricing for 150 models +ℹ Fetching pricing data from models.dev... +ℹ Loaded pricing for 200 models from models.dev +ℹ Merged pricing data: 300 total models (LiteLLM: 150, models.dev: 200) +``` + +## Related Documentation + +- [CLI Options](/guide/cli-options) - All available command-line options +- [Configuration Files](/guide/config-files) - Configuration file format +- [Cost Modes](/guide/cost-modes) - Cost calculation modes +- [Environment Variables](/guide/environment-variables) - Environment configuration + diff --git a/packages/internal/package.json b/packages/internal/package.json index 6b218a5c..e5a74acc 100644 --- a/packages/internal/package.json +++ b/packages/internal/package.json @@ -1,31 +1,32 @@ { - "name": "@ccusage/internal", - "type": "module", - "version": "17.1.8", - "private": true, - "description": "Shared internal utilities for ccusage toolchain", - "exports": { - "./pricing": "./src/pricing.ts", - "./pricing-fetch-utils": "./src/pricing-fetch-utils.ts", - "./logger": "./src/logger.ts", - "./format": "./src/format.ts", - "./constants": "./src/constants.ts" - }, - "scripts": { - "format": "pnpm run lint --fix", - "lint": "eslint --cache .", - "test": "TZ=UTC vitest", - "typecheck": "tsgo --noEmit" - }, - "dependencies": { - "@praha/byethrow": "catalog:runtime", - "consola": "catalog:runtime", - "valibot": "catalog:runtime" - }, - "devDependencies": { - "@ryoppippi/eslint-config": "catalog:lint", - "eslint": "catalog:lint", - "fs-fixture": "catalog:testing", - "vitest": "catalog:testing" - } + "name": "@ccusage/internal", + "type": "module", + "version": "17.1.8", + "private": true, + "description": "Shared internal utilities for ccusage toolchain", + "exports": { + "./pricing": "./src/pricing.ts", + "./pricing-fetch-utils": "./src/pricing-fetch-utils.ts", + "./models-dev-pricing": "./src/models-dev-pricing.ts", + "./logger": "./src/logger.ts", + "./format": "./src/format.ts", + "./constants": "./src/constants.ts" + }, + "scripts": { + "format": "pnpm run lint --fix", + "lint": "eslint --cache .", + "test": "TZ=UTC vitest", + "typecheck": "tsgo --noEmit" + }, + "dependencies": { + "@praha/byethrow": "catalog:runtime", + "consola": "catalog:runtime", + "valibot": "catalog:runtime" + }, + "devDependencies": { + "@ryoppippi/eslint-config": "catalog:lint", + "eslint": "catalog:lint", + "fs-fixture": "catalog:testing", + "vitest": "catalog:testing" + } } diff --git a/packages/internal/src/models-dev-pricing.ts b/packages/internal/src/models-dev-pricing.ts new file mode 100644 index 00000000..5306448f --- /dev/null +++ b/packages/internal/src/models-dev-pricing.ts @@ -0,0 +1,181 @@ +import type { LiteLLMModelPricing } from "./pricing.ts"; +import { Result } from "@praha/byethrow"; +import * as v from "valibot"; + +export const MODELS_DEV_API_URL = "https://models.dev/api.json"; + +/** + * models.dev Model Cost Schema + * Prices are in USD per million tokens + */ +const modelsDevCostSchema = v.object({ + input: v.optional(v.number()), + output: v.optional(v.number()), + cache_read: v.optional(v.number()), + cache_write: v.optional(v.number()), +}); + +/** + * models.dev Model Limit Schema + */ +const modelsDevLimitSchema = v.object({ + context: v.optional(v.number()), + output: v.optional(v.number()), +}); + +/** + * models.dev Model Schema + */ +const modelsDevModelSchema = v.object({ + id: v.string(), + name: v.optional(v.string()), + cost: v.optional(modelsDevCostSchema), + limit: v.optional(modelsDevLimitSchema), +}); + +/** + * models.dev Provider Schema + */ +const modelsDevProviderSchema = v.object({ + id: v.string(), + models: v.optional(v.record(v.string(), modelsDevModelSchema)), +}); + +/** + * models.dev API Response Schema + */ +const modelsDevApiSchema = v.record(v.string(), modelsDevProviderSchema); + +export type ModelsDevModel = v.InferOutput; +export type ModelsDevProvider = v.InferOutput; +export type ModelsDevApiResponse = v.InferOutput; + +/** + * Convert models.dev pricing to LiteLLM format + * models.dev uses cost per million tokens, LiteLLM uses cost per token + */ +export function convertModelsDevToLiteLLM( + model: ModelsDevModel, +): LiteLLMModelPricing { + const cost = model.cost; + const limit = model.limit; + + return { + // Convert from per-million to per-token by dividing by 1,000,000 + input_cost_per_token: cost?.input != null ? cost.input / 1_000_000 : undefined, + output_cost_per_token: cost?.output != null ? cost.output / 1_000_000 : undefined, + cache_read_input_token_cost: cost?.cache_read != null ? cost.cache_read / 1_000_000 : undefined, + cache_creation_input_token_cost: cost?.cache_write != null ? cost.cache_write / 1_000_000 : undefined, + max_input_tokens: limit?.context, + max_tokens: limit?.context, + max_output_tokens: limit?.output, + }; +} + +/** + * Fetch and parse pricing data from models.dev API + */ +export async function fetchModelsDevPricing(): Result.ResultAsync, Error> { + return Result.pipe( + Result.try({ + try: fetch(MODELS_DEV_API_URL), + catch: error => new Error("Failed to fetch pricing from models.dev", { cause: error }), + }), + Result.andThrough((response) => { + if (!response.ok) { + return Result.fail(new Error(`Failed to fetch models.dev pricing: ${response.statusText}`)); + } + return Result.succeed(); + }), + Result.andThen(async response => Result.try({ + try: response.json() as Promise>, + catch: error => new Error("Failed to parse models.dev response", { cause: error }), + })), + Result.andThen((data) => { + const parsed = v.safeParse(modelsDevApiSchema, data); + if (!parsed.success) { + return Result.fail(new Error("Invalid models.dev API response format")); + } + return Result.succeed(parsed.output); + }), + Result.map((apiResponse) => { + const pricing = new Map(); + + for (const [providerId, provider] of Object.entries(apiResponse)) { + if (provider.models == null) { + continue; + } + + for (const [modelId, model] of Object.entries(provider.models)) { + // Add model with provider prefix (e.g., "anthropic/claude-sonnet-4-5") + const providerPrefixedKey = `${providerId}/${modelId}`; + pricing.set(providerPrefixedKey, convertModelsDevToLiteLLM(model)); + + // Also add without provider prefix for easier matching + pricing.set(modelId, convertModelsDevToLiteLLM(model)); + } + } + + return pricing; + }), + ); +} + +if (import.meta.vitest != null) { + describe("models.dev pricing utilities", () => { + it("converts models.dev pricing to LiteLLM format", () => { + const modelsDevModel: ModelsDevModel = { + id: "claude-sonnet-4-5", + name: "Claude Sonnet 4.5", + cost: { + input: 3, + output: 15, + cache_read: 0.3, + cache_write: 3.75, + }, + limit: { + context: 200_000, + output: 64_000, + }, + }; + + const liteLLMPricing = convertModelsDevToLiteLLM(modelsDevModel); + + expect(liteLLMPricing.input_cost_per_token).toBeCloseTo(3 / 1_000_000); + expect(liteLLMPricing.output_cost_per_token).toBeCloseTo(15 / 1_000_000); + expect(liteLLMPricing.cache_read_input_token_cost).toBeCloseTo(0.3 / 1_000_000); + expect(liteLLMPricing.cache_creation_input_token_cost).toBeCloseTo(3.75 / 1_000_000); + expect(liteLLMPricing.max_input_tokens).toBe(200_000); + expect(liteLLMPricing.max_output_tokens).toBe(64_000); + }); + + it("handles missing cost fields gracefully", () => { + const modelsDevModel: ModelsDevModel = { + id: "test-model", + cost: { + input: 1, + // output, cache_read, cache_write missing + }, + }; + + const liteLLMPricing = convertModelsDevToLiteLLM(modelsDevModel); + + expect(liteLLMPricing.input_cost_per_token).toBeCloseTo(1 / 1_000_000); + expect(liteLLMPricing.output_cost_per_token).toBeUndefined(); + expect(liteLLMPricing.cache_read_input_token_cost).toBeUndefined(); + expect(liteLLMPricing.cache_creation_input_token_cost).toBeUndefined(); + }); + + it("handles missing limit fields gracefully", () => { + const modelsDevModel: ModelsDevModel = { + id: "test-model", + }; + + const liteLLMPricing = convertModelsDevToLiteLLM(modelsDevModel); + + expect(liteLLMPricing.max_input_tokens).toBeUndefined(); + expect(liteLLMPricing.max_output_tokens).toBeUndefined(); + }); + }); +} + diff --git a/packages/internal/src/pricing.ts b/packages/internal/src/pricing.ts index ef08a151..3f710be7 100644 --- a/packages/internal/src/pricing.ts +++ b/packages/internal/src/pricing.ts @@ -1,5 +1,6 @@ import { Result } from '@praha/byethrow'; import * as v from 'valibot'; +import { fetchModelsDevPricing } from './models-dev-pricing.ts'; export const LITELLM_PRICING_URL = 'https://raw.githubusercontent.com/BerriAI/litellm/main/model_prices_and_context_window.json'; @@ -61,6 +62,12 @@ export type LiteLLMPricingFetcherOptions = { offlineLoader?: () => Promise>; url?: string; providerPrefixes?: string[]; + /** + * Enable models.dev as an additional pricing data source + * When enabled, pricing data from models.dev will be merged with LiteLLM data + * @default false + */ + useModelsDev?: boolean; }; const DEFAULT_PROVIDER_PREFIXES = [ @@ -93,6 +100,7 @@ export class LiteLLMPricingFetcher implements Disposable { private readonly offlineLoader?: () => Promise>; private readonly url: string; private readonly providerPrefixes: string[]; + private readonly useModelsDev: boolean; constructor(options: LiteLLMPricingFetcherOptions = {}) { this.logger = createLogger(options.logger); @@ -100,6 +108,7 @@ export class LiteLLMPricingFetcher implements Disposable { this.offlineLoader = options.offlineLoader; this.url = options.url ?? LITELLM_PRICING_URL; this.providerPrefixes = options.providerPrefixes ?? DEFAULT_PROVIDER_PREFIXES; + this.useModelsDev = Boolean(options.useModelsDev); } [Symbol.dispose](): void { @@ -138,6 +147,33 @@ export class LiteLLMPricingFetcher implements Disposable { ); } + private fetchModelsDevPricing = Result.try({ + try: async () => { + this.logger.info('Fetching pricing data from models.dev...'); + const result = await fetchModelsDevPricing(); + if (Result.isFailure(result)) { + throw result.error; + } + this.logger.info(`Loaded pricing for ${result.value.size} models from models.dev`); + return result.value; + }, + catch: error => new Error('Failed to fetch pricing from models.dev', { cause: error }), + }); + + private mergePricingMaps( + base: Map, + additional: Map, + ): Map { + const merged = new Map(base); + for (const [key, value] of additional) { + // Only add if not already present (LiteLLM data takes precedence) + if (!merged.has(key)) { + merged.set(key, value); + } + } + return merged; + } + private async ensurePricingLoaded(): Result.ResultAsync, Error> { return Result.pipe( this.cachedPricing != null ? Result.succeed(this.cachedPricing) : Result.fail(new Error('Cached pricing not available')), @@ -178,6 +214,21 @@ export class LiteLLMPricingFetcher implements Disposable { } return pricing; }), + Result.andThen(async (liteLLMPricing) => { + if (!this.useModelsDev) { + return Result.succeed(liteLLMPricing); + } + + const modelsDevResult = await this.fetchModelsDevPricing(); + if (Result.isFailure(modelsDevResult)) { + this.logger.warn('Failed to fetch models.dev pricing, using only LiteLLM data'); + return Result.succeed(liteLLMPricing); + } + + const merged = this.mergePricingMaps(liteLLMPricing, modelsDevResult.value); + this.logger.info(`Merged pricing data: ${merged.size} total models (LiteLLM: ${liteLLMPricing.size}, models.dev: ${modelsDevResult.value.size})`); + return Result.succeed(merged); + }), Result.inspect((pricing) => { this.cachedPricing = pricing; this.logger.info(`Loaded pricing for ${pricing.size} models`);