Skip to content

Commit 564d019

Browse files
author
Andrei Bratu
committed
test for prompt decorator
1 parent 0d2443c commit 564d019

2 files changed

Lines changed: 242 additions & 0 deletions

File tree

Lines changed: 68 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,68 @@
1+
import {
2+
TestPrompt,
3+
TestSetup,
4+
cleanupTestEnvironment,
5+
createTestPrompt,
6+
deleteTestPrompt,
7+
setupTestEnvironment,
8+
testPromptDecorator,
9+
} from "./test-utils";
10+
11+
// Long timeout per test
12+
jest.setTimeout(30 * 1000);
13+
14+
describe("decorators", () => {
15+
let testSetup: TestSetup;
16+
let testPrompt: TestPrompt | null = null;
17+
18+
beforeAll(async () => {
19+
testSetup = await setupTestEnvironment("decorators");
20+
});
21+
22+
afterAll(async () => {
23+
await cleanupTestEnvironment(testSetup);
24+
});
25+
26+
afterEach(async () => {
27+
// Clean up individual test resources
28+
if (testPrompt) {
29+
await deleteTestPrompt(testSetup, testPrompt.id);
30+
testPrompt = null;
31+
}
32+
});
33+
34+
it("should create a prompt log when using the decorator", async () => {
35+
try {
36+
// Create test prompt
37+
testPrompt = await createTestPrompt(testSetup);
38+
39+
// Check initial version count
40+
const promptVersionsResponse =
41+
await testSetup.humanloopClient.prompts.listVersions(testPrompt.id);
42+
expect(promptVersionsResponse.records.length).toBe(1);
43+
44+
// Test the prompt decorator
45+
await testPromptDecorator(testSetup, testPrompt);
46+
47+
// Verify a new version was created
48+
const updatedPromptVersionsResponse =
49+
await testSetup.humanloopClient.prompts.listVersions(testPrompt.id);
50+
expect(updatedPromptVersionsResponse.records.length).toBe(2);
51+
52+
// Verify logs were created
53+
const logsResponse = await testSetup.humanloopClient.logs.list({
54+
fileId: testPrompt.id,
55+
page: 1,
56+
size: 50,
57+
});
58+
expect(logsResponse.data.length).toBe(1);
59+
} catch (error) {
60+
// Make sure to clean up if the test fails
61+
if (testPrompt) {
62+
await deleteTestPrompt(testSetup, testPrompt.id);
63+
testPrompt = null;
64+
}
65+
throw error;
66+
}
67+
});
68+
});

tests/integration/test-utils.ts

Lines changed: 174 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,174 @@
1+
import dotenv from "dotenv";
2+
import { OpenAI } from "openai";
3+
import { v4 as uuidv4 } from "uuid";
4+
5+
import { PromptRequest, PromptResponse } from "../../src/api";
6+
import { HumanloopClient } from "../../src/humanloop.client";
7+
8+
export interface TestIdentifiers {
9+
id: string;
10+
path: string;
11+
}
12+
13+
export interface TestPrompt {
14+
id: string;
15+
path: string;
16+
response: PromptResponse;
17+
}
18+
19+
export interface TestSetup {
20+
sdkTestDir: TestIdentifiers;
21+
testPromptConfig: PromptRequest;
22+
openaiApiKey: string;
23+
humanloopClient: HumanloopClient;
24+
}
25+
26+
export function readEnvironment(): void {
27+
if (![process.env.HUMANLOOP_API_KEY, process.env.OPENAI_API_KEY].every(Boolean)) {
28+
// Testing locally not in CI, running dotenv.config() would override the secrets set for GitHub Action
29+
dotenv.config({});
30+
}
31+
if (!process.env.HUMANLOOP_API_KEY) {
32+
throw new Error("HUMANLOOP_API_KEY is not set");
33+
}
34+
if (!process.env.OPENAI_API_KEY) {
35+
throw new Error("OPENAI_API_KEY is not set for integration tests");
36+
}
37+
}
38+
39+
export async function setupTestEnvironment(testName: string): Promise<TestSetup> {
40+
readEnvironment();
41+
42+
const openaiApiKey = process.env.OPENAI_API_KEY!;
43+
const humanloopClient = new HumanloopClient({
44+
apiKey: process.env.HUMANLOOP_API_KEY,
45+
instrumentProviders: {
46+
OpenAI: OpenAI,
47+
},
48+
});
49+
50+
// Create a test directory
51+
const directoryPath = `SDK_TEST_${testName}_${uuidv4()}`;
52+
const response = await humanloopClient.directories.create({
53+
path: directoryPath,
54+
});
55+
56+
const sdkTestDir = {
57+
id: response.id,
58+
path: response.path,
59+
};
60+
61+
// Create test prompt config
62+
const testPromptConfig: PromptRequest = {
63+
provider: "openai",
64+
model: "gpt-4o-mini",
65+
temperature: 0.5,
66+
template: [
67+
{
68+
role: "system",
69+
content: "You are a helpful assistant. Answer concisely.",
70+
},
71+
{
72+
role: "user",
73+
content: "{{question}}",
74+
},
75+
],
76+
};
77+
78+
return {
79+
sdkTestDir,
80+
testPromptConfig,
81+
openaiApiKey,
82+
humanloopClient,
83+
};
84+
}
85+
86+
/**
87+
* Creates a test prompt in the specified test environment
88+
*/
89+
export async function createTestPrompt(
90+
setup: TestSetup,
91+
name: string = "test_prompt",
92+
customConfig?: Partial<PromptRequest>,
93+
): Promise<TestPrompt> {
94+
const promptPath = `${setup.sdkTestDir.path}/${name}`;
95+
const config = customConfig
96+
? { ...setup.testPromptConfig, ...customConfig }
97+
: setup.testPromptConfig;
98+
99+
const promptResponse = await setup.humanloopClient.prompts.upsert({
100+
path: promptPath,
101+
...config,
102+
});
103+
104+
return {
105+
id: promptResponse.id,
106+
path: promptPath,
107+
response: promptResponse,
108+
};
109+
}
110+
111+
/**
112+
* Creates a base function for LLM calls that can be decorated
113+
*/
114+
export function createBaseLLMFunction(setup: TestSetup, model: string = "gpt-4o-mini") {
115+
return async (question: string): Promise<string> => {
116+
const openaiClient = new OpenAI({ apiKey: setup.openaiApiKey });
117+
118+
const response = await openaiClient.chat.completions.create({
119+
model: model,
120+
messages: [{ role: "user", content: question }],
121+
});
122+
123+
return response.choices[0].message.content || "";
124+
};
125+
}
126+
127+
/**
128+
* Applies the prompt decorator to a function and tests it
129+
*/
130+
export async function testPromptDecorator(
131+
setup: TestSetup,
132+
prompt: TestPrompt,
133+
input: string = "What is the capital of the France?",
134+
expectedSubstring: string = "paris",
135+
): Promise<void> {
136+
// Create the base function
137+
const myPromptBase = createBaseLLMFunction(setup);
138+
139+
// Apply the higher-order function instead of decorator
140+
const myPrompt = setup.humanloopClient.prompt({
141+
path: prompt.path,
142+
callable: myPromptBase,
143+
});
144+
145+
// Call the decorated function
146+
const result = await myPrompt(input);
147+
if (result) {
148+
expect(result.toLowerCase()).toContain(expectedSubstring.toLowerCase());
149+
} else {
150+
throw new Error("Expected result to be defined");
151+
}
152+
153+
// Wait for 5 seconds for the log to be created
154+
await new Promise((resolve) => setTimeout(resolve, 5000));
155+
}
156+
157+
/**
158+
* Deletes a test prompt
159+
*/
160+
export async function deleteTestPrompt(
161+
setup: TestSetup,
162+
promptId: string,
163+
): Promise<void> {
164+
if (promptId) {
165+
await setup.humanloopClient.prompts.delete(promptId);
166+
}
167+
}
168+
169+
export async function cleanupTestEnvironment(setup: TestSetup): Promise<void> {
170+
// Clean up the test directory
171+
if (setup.sdkTestDir.id) {
172+
await setup.humanloopClient.directories.delete(setup.sdkTestDir.id);
173+
}
174+
}

0 commit comments

Comments
 (0)