Skip to content

Commit f8bf3b5

Browse files
Switch the remaining models to the new InferenceSession
1 parent 6932252 commit f8bf3b5

4 files changed

Lines changed: 29 additions & 29 deletions

File tree

examples/pipelines/text-generation.php

Lines changed: 26 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -13,35 +13,35 @@
1313
//
1414
//$generator = pipeline('text-generation', 'Xenova/gpt2');
1515
//$generator = pipeline('text-generation', 'Xenova/Qwen1.5-0.5B-Chat');
16-
$generator = pipeline('text-generation', 'Xenova/TinyLlama-1.1B-Chat-v1.0');
17-
18-
$streamer = StdOutStreamer::make();
19-
20-
$messages = [
21-
['role' => 'system', 'content' => 'You are a helpful assistant.'],
22-
['role' => 'user', 'content' => 'What is diffusion in chemistry?'],
23-
];
16+
//$generator = pipeline('text-generation', 'Xenova/TinyLlama-1.1B-Chat-v1.0');
17+
//
18+
//$streamer = StdOutStreamer::make();
19+
//
20+
//$messages = [
21+
// ['role' => 'system', 'content' => 'You are a helpful assistant.'],
22+
// ['role' => 'user', 'content' => 'What is diffusion in chemistry?'],
23+
//];
24+
//
25+
//$input = $generator->tokenizer->applyChatTemplate($messages, addGenerationPrompt: true, tokenize: false);
26+
//
27+
//$output = $generator($input,
28+
// streamer: $streamer,
29+
// maxNewTokens: 128,
30+
// doSample: true,
31+
// returnFullText: false,
32+
//// temperature: 0.7,
33+
//// repetitionPenalty: 1.3,
34+
//// earlyStopping: true
35+
//);
2436

25-
$input = $generator->tokenizer->applyChatTemplate($messages, addGenerationPrompt: true, tokenize: false);
37+
$generator = pipeline('text-generation', 'Xenova/codegen-350M-mono');
38+
$streamer = StdOutStreamer::make($generator->tokenizer);
2639

27-
$output = $generator($input,
40+
$output = $generator(
41+
'def fib(n):',
2842
streamer: $streamer,
29-
maxNewTokens: 128,
30-
doSample: true,
31-
returnFullText: false,
32-
// temperature: 0.7,
33-
// repetitionPenalty: 1.3,
34-
// earlyStopping: true
43+
maxNewTokens: 100,
44+
doSample: true
3545
);
3646

37-
//$generator = pipeline('text-generation', 'Xenova/codegen-350M-mono');
38-
//$streamer = StdOutStreamer::make($generator->tokenizer);
39-
//
40-
//$output = $generator(
41-
// 'def fib(n):',
42-
// streamer: $streamer,
43-
// maxNewTokens: 100,
44-
// doSample: true
45-
//);
46-
4747
dd($output[0]['generated_text'], timeUsage(), memoryUsage());

src/Models/Pretrained/CodeGenPretrainedModel.php

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
use Codewithkyrian\Transformers\Models\ModelArchitecture;
99
use Codewithkyrian\Transformers\Utils\AutoConfig;
10-
use OnnxRuntime\InferenceSession;
10+
use Codewithkyrian\Transformers\Utils\InferenceSession;
1111

1212
class CodeGenPretrainedModel extends PretrainedModel
1313
{

src/Models/Pretrained/GPTBigCodePretrainedModel.php

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88
use Codewithkyrian\Transformers\Models\ModelArchitecture;
99
use Codewithkyrian\Transformers\Utils\AutoConfig;
1010
use Codewithkyrian\Transformers\Utils\GenerationConfig;
11-
use OnnxRuntime\InferenceSession;
11+
use Codewithkyrian\Transformers\Utils\InferenceSession;
1212

1313
class GPTBigCodePretrainedModel extends PretrainedModel
1414
{

src/Models/Pretrained/GPTJPretrainedModel.php

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88
use Codewithkyrian\Transformers\Models\ModelArchitecture;
99
use Codewithkyrian\Transformers\Utils\AutoConfig;
10-
use OnnxRuntime\InferenceSession;
10+
use Codewithkyrian\Transformers\Utils\InferenceSession;
1111

1212
class GPTJPretrainedModel extends PretrainedModel
1313
{

0 commit comments

Comments
 (0)