Skip to content

Commit 80c339e

Browse files
committed
chore: dir structure
1 parent 9d8e7fb commit 80c339e

35 files changed

Lines changed: 24 additions & 106 deletions
File renamed without changes.
File renamed without changes.

fast-api-react/fast-api-react/backend/poetry.lock renamed to fast-api-react/backend/poetry.lock

Lines changed: 4 additions & 4 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

fast-api-react/fast-api-react/backend/pyproject.toml renamed to fast-api-react/backend/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ hatchet = "src.workflows.main:start"
1212
[tool.poetry.dependencies]
1313
python = "^3.8"
1414
python-dotenv = "^1.0.0"
15-
hatchet-sdk = "^0.9.4"
15+
hatchet-sdk = "0.10.0"
1616
uvicorn = {extras = ["standard"], version = "^0.27.0"}
1717
fastapi = "^0.109.0"
1818
openai = "^1.11.0"
File renamed without changes.
File renamed without changes.
Lines changed: 18 additions & 22 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,15 @@
11
from fastapi import FastAPI
22
from fastapi.middleware.cors import CORSMiddleware
33
from fastapi.responses import StreamingResponse
4-
5-
from .models import MessageRequest
6-
7-
from hatchet_sdk import Hatchet
84
import uvicorn
95
from dotenv import load_dotenv
106
import json
117

8+
from hatchet_sdk import Hatchet
9+
10+
from .models import MessageRequest
11+
12+
1213
load_dotenv()
1314

1415
app = FastAPI()
@@ -30,6 +31,18 @@
3031
)
3132

3233

34+
@app.post("/message")
35+
def message(data: MessageRequest):
36+
37+
messageId = hatchet.client.admin.run_workflow("GenerateWorkflow", {
38+
"request": data.model_dump()
39+
})
40+
41+
# save step message id -> workflowRunId
42+
43+
return {"workflowRunId": messageId}
44+
45+
3346
def event_stream_generator(workflowRunId):
3447
stream = hatchet.client.listener.stream(workflowRunId)
3548

@@ -39,33 +52,16 @@ def event_stream_generator(workflowRunId):
3952
"payload": event.payload,
4053
"workflowRunId": workflowRunId
4154
})
42-
43-
# stream.abort()
44-
print(data)
4555
yield "data: " + data + "\n\n"
4656

4757

4858
@app.get("/stream/{messageId}")
4959
async def stream(messageId: str):
5060
# message id -> workflowRunId
5161
workflowRunId = messageId
52-
# stream = hatchet.stream(workflowRunId)
5362
return StreamingResponse(event_stream_generator(workflowRunId), media_type='text/event-stream')
5463

5564

56-
@app.post("/message")
57-
def message(data: MessageRequest):
58-
print(data.model_dump())
59-
60-
messageId = hatchet.client.admin.run_workflow("GenerateWorkflow", {
61-
"request": data.model_dump()
62-
})
63-
64-
# save step message id -> workflowRunId
65-
66-
return {"workflowRunId": messageId}
67-
68-
6965
def start():
70-
"""Launched with `poetry run start` at root level"""
66+
"""Launched with `poetry run api` at root level"""
7167
uvicorn.run("src.api.main:app", host="0.0.0.0", port=8000, reload=True)
File renamed without changes.

fast-api-react/fast-api-react/backend/src/workflows/__init__.py renamed to fast-api-react/backend/src/workflows/__init__.py

File renamed without changes.

fast-api-react/fast-api-react/backend/src/workflows/generate.py renamed to fast-api-react/backend/src/workflows/generate.py

Lines changed: 0 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -8,39 +8,6 @@
88
openai = OpenAI()
99

1010

11-
@hatchet.workflow(on_events=["trigger:create"])
12-
class ManualTriggerWorkflow:
13-
@hatchet.step()
14-
def step1(self, context):
15-
messages = context.workflow_input()['request']['messages']
16-
print("> starting step1", messages)
17-
return {"status": "thinking"}
18-
19-
@hatchet.step(parents=["step1"])
20-
def step2(self, context):
21-
print("starting step2")
22-
return {"status": "writing a response"}
23-
24-
@hatchet.step(parents=["step2"], timeout='5m')
25-
def step3(self, context):
26-
messages = context.workflow_input()['request']['messages']
27-
prompt = "Compose a poem that explains the concept of recursion in programming."
28-
model = "gpt-3.5-turbo"
29-
30-
completion = openai.chat.completions.create(
31-
model=model,
32-
messages=[
33-
{"role": "system", "content": prompt},
34-
] + messages
35-
)
36-
37-
return {
38-
"complete": "true",
39-
"status": "idle",
40-
"message": completion.choices[0].message.content,
41-
}
42-
43-
4411
@hatchet.workflow(on_events=["question:create"])
4512
class GenerateWorkflow:
4613

0 commit comments

Comments
 (0)