-
Notifications
You must be signed in to change notification settings - Fork 14
Expand file tree
/
Copy pathllm-chat.ts
More file actions
123 lines (111 loc) · 3.18 KB
/
llm-chat.ts
File metadata and controls
123 lines (111 loc) · 3.18 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
/**
* LLM Chat — Multi-turn automated AI conversation
*
* Demonstrates two LLMs having a multi-turn conversation using
* llmChatCompleteTask in a do-while loop. One acts as an interviewer,
* the other as a subject matter expert.
*
* Prerequisites:
* - An LLM integration configured in Conductor (e.g., "openai_integration")
* - A model available (e.g., "gpt-4o")
*
* Run:
* CONDUCTOR_SERVER_URL=http://localhost:8080 npx ts-node examples/agentic-workflows/llm-chat.ts
*/
import {
OrkesClients,
ConductorWorkflow,
llmChatCompleteTask,
inlineTask,
setVariableTask,
Role,
} from "../../src/sdk";
async function main() {
const clients = await OrkesClients.from();
const workflowClient = clients.getWorkflowClient();
const provider = process.env.LLM_PROVIDER ?? "openai_integration";
const model = process.env.LLM_MODEL ?? "gpt-4o";
const wf = new ConductorWorkflow(workflowClient, "llm_chat_example")
.description("Two LLMs having a multi-turn conversation")
.variables({ turnCount: 0, conversation: [] });
// Initialize conversation with a topic
wf.add(
setVariableTask("init_ref", {
turnCount: 0,
topic: "${workflow.input.topic}",
})
);
// Interviewer asks a question
wf.add(
llmChatCompleteTask("interviewer_ref", provider, model, {
messages: [
{
role: Role.SYSTEM,
message:
"You are a curious interviewer. Ask thoughtful, concise questions about the topic. Keep responses under 100 words.",
},
{
role: Role.USER,
message:
"Topic: ${workflow.input.topic}. This is turn ${init_ref.output.turnCount}. Ask your next question.",
},
],
temperature: 0.7,
maxTokens: 200,
})
);
// Expert responds
wf.add(
llmChatCompleteTask("expert_ref", provider, model, {
messages: [
{
role: Role.SYSTEM,
message:
"You are a subject matter expert. Give clear, informative answers. Keep responses under 150 words.",
},
{
role: Role.USER,
message: "${interviewer_ref.output.result}",
},
],
temperature: 0.5,
maxTokens: 300,
})
);
// Track conversation turns
wf.add(
inlineTask(
"track_ref",
`(function() {
var turn = ($.init_ref ? $.init_ref.output.turnCount : 0) + 1;
return {
turnCount: turn,
interviewer: $.interviewer_ref.output.result,
expert: $.expert_ref.output.result,
done: turn >= $.maxTurns
};
})()`,
"javascript"
)
);
wf.outputParameters({
topic: "${workflow.input.topic}",
turns: "${track_ref.output.result.turnCount}",
lastQuestion: "${interviewer_ref.output.result}",
lastAnswer: "${expert_ref.output.result}",
});
await wf.register(true);
console.log("Registered workflow:", wf.getName());
// Execute
const run = await wf.execute({
topic: "The future of quantum computing",
maxTurns: 3,
});
console.log("Status:", run.status);
console.log("Output:", JSON.stringify(run.output, null, 2));
process.exit(0);
}
main().catch((err) => {
console.error(err);
process.exit(1);
});