Skip to content

Commit 1dfc52c

Browse files
fix(api): typo in docs
1 parent 93a15f9 commit 1dfc52c

4 files changed

Lines changed: 34 additions & 16 deletions

File tree

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 5
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/isaacus%2Fisaacus-5033f9217ca384041df97464049b6ed07fd6d6dc47d76db59246fa67edec9ee5.yml
3-
openapi_spec_hash: ed015407e5771e22d0b48721f6f88e11
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/isaacus%2Fisaacus-84e454eb280ff5e816abbb592ea590979e180df5c49af8cbe9bec81efb847f81.yml
3+
openapi_spec_hash: 9eaef1b127c6dc6ff23ea1367d42459a
44
config_hash: 9040e7359f066240ad536041fb2c5185

src/isaacus/resources/enrichments.py

Lines changed: 20 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def create(
4949
*,
5050
model: Literal["kanon-2-enricher"],
5151
texts: Union[SequenceNotStr[str], str],
52-
overflow_strategy: Optional[Literal["auto", "drop_end"]] | Omit = omit,
52+
overflow_strategy: Optional[Literal["auto", "drop_end", "chunk"]] | Omit = omit,
5353
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
5454
# The extra values given here take precedence over values defined on the client or passed to this method.
5555
extra_headers: Headers | None = None,
@@ -71,9 +71,15 @@ def create(
7171
7272
overflow_strategy: The strategy for handling content exceeding the model's maximum input length.
7373
74-
`auto` currently behaves the same as `drop_end`, dropping excess tokens from the
75-
end of input. In the future, `auto` may implement more sophisticated strategies
76-
such as chunking and context-aware stitching.
74+
`auto`, which is the recommended setting, currently behaves the same as `chunk`,
75+
which intelligently breaks the input up into smaller chunks and then stitches
76+
the results back together into a single prediction. In the future `auto` may
77+
implement even more sophisticated strategies for handling long contexts such as
78+
leveraging chunk overlap and/or a specialized stitching model.
79+
80+
`chunk` breaks the input up into smaller chunks that fit within the model's
81+
context window and then intelligently merges the results into a single
82+
prediction at the cost of a minor accuracy drop.
7783
7884
`drop_end` drops tokens from the end of input exceeding the model's maximum
7985
input length.
@@ -131,7 +137,7 @@ async def create(
131137
*,
132138
model: Literal["kanon-2-enricher"],
133139
texts: Union[SequenceNotStr[str], str],
134-
overflow_strategy: Optional[Literal["auto", "drop_end"]] | Omit = omit,
140+
overflow_strategy: Optional[Literal["auto", "drop_end", "chunk"]] | Omit = omit,
135141
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
136142
# The extra values given here take precedence over values defined on the client or passed to this method.
137143
extra_headers: Headers | None = None,
@@ -153,9 +159,15 @@ async def create(
153159
154160
overflow_strategy: The strategy for handling content exceeding the model's maximum input length.
155161
156-
`auto` currently behaves the same as `drop_end`, dropping excess tokens from the
157-
end of input. In the future, `auto` may implement more sophisticated strategies
158-
such as chunking and context-aware stitching.
162+
`auto`, which is the recommended setting, currently behaves the same as `chunk`,
163+
which intelligently breaks the input up into smaller chunks and then stitches
164+
the results back together into a single prediction. In the future `auto` may
165+
implement even more sophisticated strategies for handling long contexts such as
166+
leveraging chunk overlap and/or a specialized stitching model.
167+
168+
`chunk` breaks the input up into smaller chunks that fit within the model's
169+
context window and then intelligently merges the results into a single
170+
prediction at the cost of a minor accuracy drop.
159171
160172
`drop_end` drops tokens from the end of input exceeding the model's maximum
161173
input length.

src/isaacus/types/enrichment_create_params.py

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -25,12 +25,18 @@ class EnrichmentCreateParams(TypedDict, total=False):
2525
No more than 8 texts can be enriched in a single request.
2626
"""
2727

28-
overflow_strategy: Optional[Literal["auto", "drop_end"]]
28+
overflow_strategy: Optional[Literal["auto", "drop_end", "chunk"]]
2929
"""The strategy for handling content exceeding the model's maximum input length.
3030
31-
`auto` currently behaves the same as `drop_end`, dropping excess tokens from the
32-
end of input. In the future, `auto` may implement more sophisticated strategies
33-
such as chunking and context-aware stitching.
31+
`auto`, which is the recommended setting, currently behaves the same as `chunk`,
32+
which intelligently breaks the input up into smaller chunks and then stitches
33+
the results back together into a single prediction. In the future `auto` may
34+
implement even more sophisticated strategies for handling long contexts such as
35+
leveraging chunk overlap and/or a specialized stitching model.
36+
37+
`chunk` breaks the input up into smaller chunks that fit within the model's
38+
context window and then intelligently merges the results into a single
39+
prediction at the cost of a minor accuracy drop.
3440
3541
`drop_end` drops tokens from the end of input exceeding the model's maximum
3642
input length.

tests/api_resources/test_enrichments.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def test_method_create_with_all_params(self, client: Isaacus) -> None:
3232
enrichment = client.enrichments.create(
3333
model="kanon-2-enricher",
3434
texts=['1.5 You (the "User") agree to be bound by these Terms.'],
35-
overflow_strategy=None,
35+
overflow_strategy="auto",
3636
)
3737
assert_matches_type(EnrichmentResponse, enrichment, path=["response"])
3838

@@ -85,7 +85,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncIsaacus) -
8585
enrichment = await async_client.enrichments.create(
8686
model="kanon-2-enricher",
8787
texts=['1.5 You (the "User") agree to be bound by these Terms.'],
88-
overflow_strategy=None,
88+
overflow_strategy="auto",
8989
)
9090
assert_matches_type(EnrichmentResponse, enrichment, path=["response"])
9191

0 commit comments

Comments
 (0)