Skip to content

Commit 93a15f9

Browse files
fix(api): typo in docs
1 parent eae60d6 commit 93a15f9

6 files changed

Lines changed: 18 additions & 36 deletions

File tree

.stats.yml

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
configured_endpoints: 5
2-
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/isaacus%2Fisaacus-854d1a74fd0240b79b6a7902200adf22b85d5cb67710abe7c0177b4f8801157f.yml
3-
openapi_spec_hash: 9a141dbe42dfb83a674e69441888776f
2+
openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/isaacus%2Fisaacus-5033f9217ca384041df97464049b6ed07fd6d6dc47d76db59246fa67edec9ee5.yml
3+
openapi_spec_hash: ed015407e5771e22d0b48721f6f88e11
44
config_hash: 9040e7359f066240ad536041fb2c5185

src/isaacus/resources/enrichments.py

Lines changed: 8 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ def create(
4949
*,
5050
model: Literal["kanon-2-enricher"],
5151
texts: Union[SequenceNotStr[str], str],
52-
overflow_strategy: Optional[Literal["auto", "drop_end", "chunk"]] | Omit = omit,
52+
overflow_strategy: Optional[Literal["auto", "drop_end"]] | Omit = omit,
5353
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
5454
# The extra values given here take precedence over values defined on the client or passed to this method.
5555
extra_headers: Headers | None = None,
@@ -71,15 +71,9 @@ def create(
7171
7272
overflow_strategy: The strategy for handling content exceeding the model's maximum input length.
7373
74-
`auto`, which is the recommended setting, currently behaves the same as `chunk`,
75-
which intelligently breaks the input up into smaller chunks and then stitches
76-
the results back together into a single prediction. In the future `auto` may
77-
implement even more sophisticated strategies for handling long contexts such as
78-
leveraging chunk overlap and/or a specialized stitching model.
79-
80-
`chunk` breaks the input up into smaller chunks that fit within the model's
81-
context window and then intelligently merges the results into a single
82-
prediction at the cost of a minor accuracy drop.
74+
`auto` currently behaves the same as `drop_end`, dropping excess tokens from the
75+
end of input. In the future, `auto` may implement more sophisticated strategies
76+
such as chunking and context-aware stitching.
8377
8478
`drop_end` drops tokens from the end of input exceeding the model's maximum
8579
input length.
@@ -137,7 +131,7 @@ async def create(
137131
*,
138132
model: Literal["kanon-2-enricher"],
139133
texts: Union[SequenceNotStr[str], str],
140-
overflow_strategy: Optional[Literal["auto", "drop_end", "chunk"]] | Omit = omit,
134+
overflow_strategy: Optional[Literal["auto", "drop_end"]] | Omit = omit,
141135
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
142136
# The extra values given here take precedence over values defined on the client or passed to this method.
143137
extra_headers: Headers | None = None,
@@ -159,15 +153,9 @@ async def create(
159153
160154
overflow_strategy: The strategy for handling content exceeding the model's maximum input length.
161155
162-
`auto`, which is the recommended setting, currently behaves the same as `chunk`,
163-
which intelligently breaks the input up into smaller chunks and then stitches
164-
the results back together into a single prediction. In the future `auto` may
165-
implement even more sophisticated strategies for handling long contexts such as
166-
leveraging chunk overlap and/or a specialized stitching model.
167-
168-
`chunk` breaks the input up into smaller chunks that fit within the model's
169-
context window and then intelligently merges the results into a single
170-
prediction at the cost of a minor accuracy drop.
156+
`auto` currently behaves the same as `drop_end`, dropping excess tokens from the
157+
end of input. In the future, `auto` may implement more sophisticated strategies
158+
such as chunking and context-aware stitching.
171159
172160
`drop_end` drops tokens from the end of input exceeding the model's maximum
173161
input length.

src/isaacus/types/enrichment_create_params.py

Lines changed: 4 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -25,18 +25,12 @@ class EnrichmentCreateParams(TypedDict, total=False):
2525
No more than 8 texts can be enriched in a single request.
2626
"""
2727

28-
overflow_strategy: Optional[Literal["auto", "drop_end", "chunk"]]
28+
overflow_strategy: Optional[Literal["auto", "drop_end"]]
2929
"""The strategy for handling content exceeding the model's maximum input length.
3030
31-
`auto`, which is the recommended setting, currently behaves the same as `chunk`,
32-
which intelligently breaks the input up into smaller chunks and then stitches
33-
the results back together into a single prediction. In the future `auto` may
34-
implement even more sophisticated strategies for handling long contexts such as
35-
leveraging chunk overlap and/or a specialized stitching model.
36-
37-
`chunk` breaks the input up into smaller chunks that fit within the model's
38-
context window and then intelligently merges the results into a single
39-
prediction at the cost of a minor accuracy drop.
31+
`auto` currently behaves the same as `drop_end`, dropping excess tokens from the
32+
end of input. In the future, `auto` may implement more sophisticated strategies
33+
such as chunking and context-aware stitching.
4034
4135
`drop_end` drops tokens from the end of input exceeding the model's maximum
4236
input length.

src/isaacus/types/enrichment_response.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ class Result(BaseModel):
2020
document: Document
2121
"""
2222
The document enriched into version 1.0.0 of the Isaacus Legal Graph Schema
23-
(IGLS).
23+
(ILGS).
2424
2525
All spans in an enriched document graph are indexed into the Unicode code point
2626
space of a source document.

src/isaacus/types/ilgs/v1/document.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323

2424
class Document(BaseModel):
2525
"""
26-
The document enriched into version 1.0.0 of the Isaacus Legal Graph Schema (IGLS).
26+
The document enriched into version 1.0.0 of the Isaacus Legal Graph Schema (ILGS).
2727
2828
All spans in an enriched document graph are indexed into the Unicode code point space of a source document.
2929

tests/api_resources/test_enrichments.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ def test_method_create_with_all_params(self, client: Isaacus) -> None:
3232
enrichment = client.enrichments.create(
3333
model="kanon-2-enricher",
3434
texts=['1.5 You (the "User") agree to be bound by these Terms.'],
35-
overflow_strategy="auto",
35+
overflow_strategy=None,
3636
)
3737
assert_matches_type(EnrichmentResponse, enrichment, path=["response"])
3838

@@ -85,7 +85,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncIsaacus) -
8585
enrichment = await async_client.enrichments.create(
8686
model="kanon-2-enricher",
8787
texts=['1.5 You (the "User") agree to be bound by these Terms.'],
88-
overflow_strategy="auto",
88+
overflow_strategy=None,
8989
)
9090
assert_matches_type(EnrichmentResponse, enrichment, path=["response"])
9191

0 commit comments

Comments
 (0)