@@ -111,32 +111,20 @@ async function main() {
111111 // Create LLM provider
112112 const llm = createOpenAIProvider ( config . llm ) ;
113113
114- // Streaming state
115- let streamingStarted = false ;
116-
117114 // Create agent
118115 const agent = new Agent ( {
119116 llm,
120117 workspaceDir,
121118 callbacks : {
122119 onAssistantText : ( text ) => {
123120 if ( text . trim ( ) === "HEARTBEAT_OK" ) return ;
124- if ( streamingStarted ) {
125- // Text was already streamed, just finish
126- process . stdout . write ( "\n\n" ) ;
127- } else {
128- // Non-streamed response: render markdown
129- const rendered = renderMarkdown ( text ) ;
130- console . log ( chalk . green ( "\n🦐 " ) + rendered ) ;
131- }
132- streamingStarted = false ;
121+ // Render complete response with markdown formatting
122+ const rendered = renderMarkdown ( text ) ;
123+ console . log ( chalk . green ( "\n🦐 " ) + rendered ) ;
133124 } ,
134- onTextChunk : ( chunk ) => {
135- if ( ! streamingStarted ) {
136- process . stdout . write ( chalk . green ( "\n🦐 " ) ) ;
137- streamingStarted = true ;
138- }
139- process . stdout . write ( chunk ) ;
125+ onTextChunk : ( ) => {
126+ // Streaming runs under the hood but we wait for the complete text
127+ // to render markdown properly
140128 } ,
141129 onToolCall : ( name , args ) => {
142130 console . log (
@@ -262,7 +250,6 @@ async function main() {
262250 } ) ;
263251
264252 async function handleMessage ( text : string ) : Promise < void > {
265- streamingStarted = false ;
266253 console. log ( chalk . dim ( "⏳ Thinking..." ) ) ;
267254 try {
268255 await agent . chat ( text ) ;
0 commit comments