Skip to content

Commit 1eeced8

Browse files
committed
🔧 修正(cli.cjs, github-action.cjs, api.ts, config.ts, generateCommitMessageFromGitDiff.ts): OCO_TOKEN_LIMITのデフォルト値を4096に設定しました
OCO_TOKEN_LIMITのデフォルト値を4096に設定しました。また、configファイルと関連する箇所でOCO_TOKEN_LIMITを使用するように変更しました。 ※gpt-4やgpt-3.5-turbo-16kの最大トークン数に対応するためにOCO_TOKEN_LIMITでトークン数の最大数を変更できるようにした 🔧 fix(cli.cjs, github-action.cjs, api.ts, config.ts, generateCommitMessageFromGitDiff.ts): Set the default value of OCO_TOKEN_LIMIT to 4096 Set the default value of OCO_TOKEN_LIMIT to 4096. Also updated the relevant parts in the config file and other files to use OCO_TOKEN_LIMIT.
1 parent 42a34ca commit 1eeced8

7 files changed

Lines changed: 86 additions & 13 deletions

File tree

README.md

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,24 @@
1919

2020
All the commits in this repo are authored by OpenCommit — look at [the commits](https://github.com/di-sukharev/opencommit/commit/eae7618d575ee8d2e9fff5de56da79d40c4bc5fc) to see how OpenCommit works. Emojis and long commit descriptions are configurable.
2121

22+
## Enhanced from the original upstream [di-sukharev/opencommit](https://github.com/di-sukharev/opencommit)
23+
24+
This repository is forked from the original [di-sukharev/opencommit](https://github.com/di-sukharev/opencommit).
25+
Enhancing functionality adopting early some pull requests I need.
26+
27+
Added features
28+
* Support [Azure OpenAI Service #167](https://github.com/di-sukharev/opencommit/pull/167)
29+
* Support [push config #220](https://github.com/di-sukharev/opencommit/pull/220)
30+
* Add translations to English commit messages
31+
* ~~Add [prefix on commit messages #160](https://github.com/di-sukharev/opencommit/pull/160)~~
32+
* The fixed token limit of 4096 can now be changed with OCO_TOKEN_LIMIT to support gtp-3.5-turbo-16k.
33+
and etc..
34+
35+
You can install it with the following command.
36+
``` shell
37+
$ npm install github:takuya-o/opencommit
38+
```
39+
2240
## Setup OpenCommit as a CLI tool
2341

2442
You can use OpenCommit by simply running it via the CLI like this `oco`. 2 seconds and your staged changes are committed with a meaningful message.
@@ -172,12 +190,14 @@ You may switch to GPT-4 which performs better, but costs ~x15 times more 🤠
172190

173191
```sh
174192
oco config set OCO_MODEL=gpt-4
193+
oco config set OCO_TOKEN_LIMIT=32768
175194
```
176195

177196
or for as a cheaper option:
178197

179198
```sh
180199
oco config set OCO_MODEL=gpt-3.5-turbo
200+
oco config set OCO_TOKEN_LIMIT=16384
181201
```
182202

183203
Make sure that you spell it `gpt-4` (lowercase) and that you have API access to the 4th model. Even if you have ChatGPT+, that doesn't necessarily mean that you have API access to GPT-4.

out/cli.cjs

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18882,7 +18882,6 @@ function getI18nLocal(value) {
1888218882
var dotenv = __toESM(require_main(), 1);
1888318883
dotenv.config();
1888418884
var configCache = null;
18885-
var DEFAULT_MODEL_TOKEN_LIMIT = 4096;
1888618885
var validateConfig = (key, condition, validationMessage) => {
1888718886
if (!condition) {
1888818887
ce(
@@ -18941,6 +18940,22 @@ var configValidators = {
1894118940
);
1894218941
return value;
1894318942
},
18943+
["OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */](value) {
18944+
if (typeof value === "string") {
18945+
value = parseInt(value);
18946+
validateConfig(
18947+
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
18948+
!isNaN(value),
18949+
"Must be a number"
18950+
);
18951+
}
18952+
validateConfig(
18953+
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
18954+
value ? typeof value === "number" : void 0,
18955+
"Must be a number"
18956+
);
18957+
return value;
18958+
},
1894418959
["OCO_LANGUAGE" /* OCO_LANGUAGE */](value) {
1894518960
validateConfig(
1894618961
"OCO_LANGUAGE" /* OCO_LANGUAGE */,
@@ -19013,6 +19028,7 @@ var getConfig = () => {
1901319028
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === "true" ? true : false,
1901419029
OCO_EMOJI: process.env.OCO_EMOJI === "true" ? true : false,
1901519030
OCO_MODEL: process.env.OCO_MODEL || "gpt-3.5-turbo-16k",
19031+
OCO_TOKEN_LIMIT: 4096,
1901619032
OCO_LANGUAGE: process.env.OCO_LANGUAGE || "en",
1901719033
OCO_DISABLE_GIT_PUSH: Boolean(process.env.OCO_DISABLE_GIT_PUSH),
1901819034
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER
@@ -23358,6 +23374,7 @@ var maxTokens = config2?.OCO_OPENAI_MAX_TOKENS;
2335823374
var basePath = config2?.OCO_OPENAI_BASE_PATH;
2335923375
var apiKey = config2?.OCO_OPENAI_API_KEY;
2336023376
var apiType = config2?.OCO_OPENAI_API_TYPE || "openai";
23377+
var tokenLimit = config2?.OCO_TOKEN_LIMIT || 4096;
2336123378
var [command, mode] = process.argv.slice(2);
2336223379
if (!apiKey && command !== "config" && mode !== "set" /* set */) {
2336323380
ae("opencommit");
@@ -23410,7 +23427,7 @@ var OpenAi = class {
2341023427
};
2341123428
try {
2341223429
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content || "") + 4).reduce((a2, b5) => a2 + b5, 0);
23413-
if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
23430+
if (REQUEST_TOKENS > tokenLimit - maxTokens) {
2341423431
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
2341523432
}
2341623433
const { data } = await this.openAI.createChatCompletion(params);
@@ -23525,7 +23542,7 @@ var INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
2352523542
var ADJUSTMENT_FACTOR = 20;
2352623543
var generateCommitMessageByDiff = async (diff) => {
2352723544
try {
23528-
const MAX_REQUEST_TOKENS = DEFAULT_MODEL_TOKEN_LIMIT - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
23545+
const MAX_REQUEST_TOKENS = (config3?.OCO_TOKEN_LIMIT || 4096) - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
2352923546
if (tokenCount(diff) >= MAX_REQUEST_TOKENS) {
2353023547
const commitMessagePromises = getCommitMsgsPromisesFromFileDiffs(
2353123548
diff,

out/github-action.cjs

Lines changed: 20 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -29081,7 +29081,6 @@ function getI18nLocal(value) {
2908129081
var dotenv = __toESM(require_main(), 1);
2908229082
dotenv.config();
2908329083
var configCache = null;
29084-
var DEFAULT_MODEL_TOKEN_LIMIT = 4096;
2908529084
var validateConfig = (key, condition, validationMessage) => {
2908629085
if (!condition) {
2908729086
ce(
@@ -29140,6 +29139,22 @@ var configValidators = {
2914029139
);
2914129140
return value;
2914229141
},
29142+
["OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */](value) {
29143+
if (typeof value === "string") {
29144+
value = parseInt(value);
29145+
validateConfig(
29146+
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
29147+
!isNaN(value),
29148+
"Must be a number"
29149+
);
29150+
}
29151+
validateConfig(
29152+
"OCO_TOKEN_LIMIT" /* OCO_TOKEN_LIMIT */,
29153+
value ? typeof value === "number" : void 0,
29154+
"Must be a number"
29155+
);
29156+
return value;
29157+
},
2914329158
["OCO_LANGUAGE" /* OCO_LANGUAGE */](value) {
2914429159
validateConfig(
2914529160
"OCO_LANGUAGE" /* OCO_LANGUAGE */,
@@ -29212,6 +29227,7 @@ var getConfig = () => {
2921229227
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === "true" ? true : false,
2921329228
OCO_EMOJI: process.env.OCO_EMOJI === "true" ? true : false,
2921429229
OCO_MODEL: process.env.OCO_MODEL || "gpt-3.5-turbo-16k",
29230+
OCO_TOKEN_LIMIT: 4096,
2921529231
OCO_LANGUAGE: process.env.OCO_LANGUAGE || "en",
2921629232
OCO_DISABLE_GIT_PUSH: Boolean(process.env.OCO_DISABLE_GIT_PUSH),
2921729233
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER
@@ -30445,6 +30461,7 @@ var maxTokens = config2?.OCO_OPENAI_MAX_TOKENS;
3044530461
var basePath = config2?.OCO_OPENAI_BASE_PATH;
3044630462
var apiKey = config2?.OCO_OPENAI_API_KEY;
3044730463
var apiType = config2?.OCO_OPENAI_API_TYPE || "openai";
30464+
var tokenLimit = config2?.OCO_TOKEN_LIMIT || 4096;
3044830465
var [command, mode] = process.argv.slice(2);
3044930466
if (!apiKey && command !== "config" && mode !== "set" /* set */) {
3045030467
ae("opencommit");
@@ -30497,7 +30514,7 @@ var OpenAi = class {
3049730514
};
3049830515
try {
3049930516
const REQUEST_TOKENS = messages.map((msg) => tokenCount(msg.content || "") + 4).reduce((a2, b) => a2 + b, 0);
30500-
if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
30517+
if (REQUEST_TOKENS > tokenLimit - maxTokens) {
3050130518
throw new Error("TOO_MUCH_TOKENS" /* tooMuchTokens */);
3050230519
}
3050330520
const { data } = await this.openAI.createChatCompletion(params);
@@ -30603,7 +30620,7 @@ var INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
3060330620
var ADJUSTMENT_FACTOR = 20;
3060430621
var generateCommitMessageByDiff = async (diff) => {
3060530622
try {
30606-
const MAX_REQUEST_TOKENS = DEFAULT_MODEL_TOKEN_LIMIT - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
30623+
const MAX_REQUEST_TOKENS = (config3?.OCO_TOKEN_LIMIT || 4096) - ADJUSTMENT_FACTOR - INIT_MESSAGES_PROMPT_LENGTH - config3?.OCO_OPENAI_MAX_TOKENS;
3060730624
if (tokenCount(diff) >= MAX_REQUEST_TOKENS) {
3060830625
const commitMessagePromises = getCommitMsgsPromisesFromFileDiffs(
3060930626
diff,

package-lock.json

Lines changed: 3 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

src/api.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import {
99

1010
import {
1111
CONFIG_MODES,
12-
DEFAULT_MODEL_TOKEN_LIMIT,
1312
getConfig
1413
} from './commands/config';
1514
import { tokenCount } from './utils/tokenCount';
@@ -22,6 +21,7 @@ let maxTokens = config?.OCO_OPENAI_MAX_TOKENS;
2221
let basePath = config?.OCO_OPENAI_BASE_PATH;
2322
let apiKey = config?.OCO_OPENAI_API_KEY;
2423
let apiType = config?.OCO_OPENAI_API_TYPE || 'openai';
24+
let tokenLimit = config?.OCO_TOKEN_LIMIT || 4096;
2525

2626
const [command, mode] = process.argv.slice(2);
2727

@@ -87,7 +87,7 @@ class OpenAi {
8787
.map((msg) => tokenCount(msg.content || '') + 4)
8888
.reduce((a, b) => a + b, 0);
8989

90-
if (REQUEST_TOKENS > DEFAULT_MODEL_TOKEN_LIMIT - maxTokens) {
90+
if (REQUEST_TOKENS > tokenLimit - maxTokens) {
9191
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
9292
}
9393

src/commands/config.ts

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -22,6 +22,7 @@ export enum CONFIG_KEYS {
2222
OCO_DESCRIPTION = 'OCO_DESCRIPTION',
2323
OCO_EMOJI = 'OCO_EMOJI',
2424
OCO_MODEL = 'OCO_MODEL',
25+
OCO_TOKEN_LIMIT = 'OCO_TOKEN_LIMIT',
2526
OCO_LANGUAGE = 'OCO_LANGUAGE',
2627
OCO_DISABLE_GIT_PUSH = 'OCO_DISABLE_GIT_PUSH',
2728
OCO_MESSAGE_TEMPLATE_PLACEHOLDER = 'OCO_MESSAGE_TEMPLATE_PLACEHOLDER'
@@ -111,6 +112,23 @@ export const configValidators = {
111112
);
112113
return value;
113114
},
115+
[CONFIG_KEYS.OCO_TOKEN_LIMIT](value: any) {
116+
// If the value is a string, convert it to a number.
117+
if (typeof value === 'string') {
118+
value = parseInt(value);
119+
validateConfig(
120+
CONFIG_KEYS.OCO_TOKEN_LIMIT,
121+
!isNaN(value),
122+
'Must be a number'
123+
);
124+
}
125+
validateConfig(
126+
CONFIG_KEYS.OCO_TOKEN_LIMIT,
127+
value ? typeof value === 'number' : undefined,
128+
'Must be a number'
129+
);
130+
return value;
131+
},
114132
[CONFIG_KEYS.OCO_LANGUAGE](value: any) {
115133
validateConfig(
116134
CONFIG_KEYS.OCO_LANGUAGE,
@@ -197,6 +215,7 @@ export const getConfig = (): ConfigType | null => {
197215
OCO_DESCRIPTION: process.env.OCO_DESCRIPTION === 'true' ? true : false,
198216
OCO_EMOJI: process.env.OCO_EMOJI === 'true' ? true : false,
199217
OCO_MODEL: process.env.OCO_MODEL || 'gpt-3.5-turbo-16k',
218+
OCO_TOKEN_LIMIT:4096,
200219
OCO_LANGUAGE: process.env.OCO_LANGUAGE || 'en',
201220
OCO_DISABLE_GIT_PUSH: Boolean(process.env.OCO_DISABLE_GIT_PUSH),
202221
OCO_MESSAGE_TEMPLATE_PLACEHOLDER: process.env.OCO_MESSAGE_TEMPLATE_PLACEHOLDER

src/generateCommitMessageFromGitDiff.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@ import {
33
ChatCompletionRequestMessageRoleEnum
44
} from 'openai';
55
import { api } from './api';
6-
import { DEFAULT_MODEL_TOKEN_LIMIT, getConfig } from './commands/config';
6+
import { getConfig } from './commands/config';
77
import { mergeDiffs } from './utils/mergeDiffs';
88
import { i18n, I18nLocals } from './i18n';
99
import { tokenCount } from './utils/tokenCount';
@@ -95,7 +95,7 @@ export const generateCommitMessageByDiff = async (
9595
): Promise<string> => {
9696
try {
9797
const MAX_REQUEST_TOKENS =
98-
DEFAULT_MODEL_TOKEN_LIMIT -
98+
(config?.OCO_TOKEN_LIMIT || 4096) -
9999
ADJUSTMENT_FACTOR -
100100
INIT_MESSAGES_PROMPT_LENGTH -
101101
config?.OCO_OPENAI_MAX_TOKENS;

0 commit comments

Comments
 (0)