-
Notifications
You must be signed in to change notification settings - Fork 37
Expand file tree
/
Copy pathllm_chat_human_in_loop.py
More file actions
183 lines (143 loc) · 6.84 KB
/
llm_chat_human_in_loop.py
File metadata and controls
183 lines (143 loc) · 6.84 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
"""
LLM Chat with Human-in-the-Loop
Demonstrates an interactive chat where the workflow pauses for user input
between LLM responses using Conductor's WAIT task. The user types questions
in the terminal, and the LLM responds, maintaining conversation history.
Pipeline:
loop(wait_for_user --> collect_history --> chat_complete) --> summary
Requirements:
- Conductor server with AI/LLM support
- LLM provider named 'openai' with a valid API key configured
- export CONDUCTOR_SERVER_URL=http://localhost:7001/api
Usage:
python examples/agentic_workflows/llm_chat_human_in_loop.py
"""
import json
import time
from conductor.client.automator.task_handler import TaskHandler
from conductor.client.configuration.configuration import Configuration
from conductor.client.http.models.task_result_status import TaskResultStatus
from conductor.client.orkes_clients import OrkesClients
from conductor.client.worker.worker_task import worker_task
from conductor.client.workflow.conductor_workflow import ConductorWorkflow
from conductor.client.workflow.task.do_while_task import LoopTask
from conductor.client.workflow.task.llm_tasks.llm_chat_complete import LlmChatComplete, ChatMessage
from conductor.client.workflow.task.timeout_policy import TimeoutPolicy
from conductor.client.workflow.task.wait_task import WaitTask
# ---------------------------------------------------------------------------
# Configuration
# ---------------------------------------------------------------------------
LLM_PROVIDER = "openai"
LLM_MODEL = "gpt-4o-mini"
SYSTEM_PROMPT = (
"You are a helpful assistant that knows about science. "
"Answer questions clearly and concisely. If you don't know "
"something, say so. Stay on topic."
)
# ---------------------------------------------------------------------------
# Workers
# ---------------------------------------------------------------------------
@worker_task(task_definition_name='human_chat_collect_history')
def collect_history(
user_input: str = None,
assistant_response: str = None,
history: object = None,
) -> list:
"""Append the latest user and assistant messages to the conversation history.
Handles the first loop iteration where unresolved references arrive as
literal strings starting with '$'.
"""
all_history = []
if history and isinstance(history, list):
for item in history:
if isinstance(item, dict) and "role" in item and "message" in item:
all_history.append(item)
if assistant_response and not str(assistant_response).startswith("$"):
all_history.append({"role": "assistant", "message": assistant_response})
if user_input and not str(user_input).startswith("$"):
all_history.append({"role": "user", "message": user_input})
return all_history
# ---------------------------------------------------------------------------
# Workflow
# ---------------------------------------------------------------------------
def create_human_chat_workflow(executor) -> ConductorWorkflow:
wf = ConductorWorkflow(name="llm_chat_human_in_loop", version=1, executor=executor)
# Wait for the user to type a question
user_input = WaitTask(task_ref_name="user_input_ref")
# Collect conversation history
collect_history_task = collect_history(
task_ref_name="collect_history_ref",
user_input="${user_input_ref.output.question}",
history="${chat_complete_ref.input.messages}",
assistant_response="${chat_complete_ref.output.result}",
)
# Chat completion with system prompt passed inline
chat_complete = LlmChatComplete(
task_ref_name="chat_complete_ref",
llm_provider=LLM_PROVIDER,
model=LLM_MODEL,
)
# Set messages as a dynamic reference (bypass constructor to avoid string iteration)
chat_complete.input_parameters["messages"] = "${collect_history_ref.output.result}"
# Loop: wait for user -> collect history -> respond
loop_tasks = [user_input, collect_history_task, chat_complete]
chat_loop = LoopTask(task_ref_name="loop", iterations=5, tasks=loop_tasks)
wf >> chat_loop
wf.timeout_seconds(300).timeout_policy(timeout_policy=TimeoutPolicy.TIME_OUT_WORKFLOW)
return wf
# ---------------------------------------------------------------------------
# Main
# ---------------------------------------------------------------------------
def main():
api_config = Configuration()
clients = OrkesClients(configuration=api_config)
workflow_executor = clients.get_workflow_executor()
workflow_client = clients.get_workflow_client()
task_client = clients.get_task_client()
# Start workers
task_handler = TaskHandler(
workers=[], configuration=api_config, scan_for_annotated_workers=True,
)
task_handler.start_processes()
try:
wf = create_human_chat_workflow(workflow_executor)
wf.register(overwrite=True)
print("Interactive science chat (type 'quit' to exit)")
print("=" * 50)
workflow_run = wf.execute(
wait_until_task_ref="user_input_ref",
wait_for_seconds=1,
)
workflow_id = workflow_run.workflow_id
print(f"Workflow: {api_config.ui_host}/execution/{workflow_id}\n")
while workflow_run.is_running():
current = workflow_run.current_task
if current and current.workflow_task.task_reference_name == "user_input_ref":
# Show the previous assistant response if available
assistant_task = workflow_run.get_task(task_reference_name="chat_complete_ref")
if assistant_task and assistant_task.output_data.get("result"):
print(f"Assistant: {assistant_task.output_data['result'].strip()}\n")
# Get user input
question = input("You: ")
if question.lower() in ("quit", "exit", "q"):
print("\nEnding conversation.")
break
# Complete the WAIT task with user's question
task_client.update_task_sync(
workflow_id=workflow_id,
task_ref_name="user_input_ref",
status=TaskResultStatus.COMPLETED,
output={"question": question},
)
time.sleep(0.5)
workflow_run = workflow_client.get_workflow(workflow_id=workflow_id, include_tasks=True)
# Show final assistant response
if workflow_run.is_completed():
assistant_task = workflow_run.get_task(task_reference_name="chat_complete_ref")
if assistant_task and assistant_task.output_data.get("result"):
print(f"Assistant: {assistant_task.output_data['result'].strip()}")
print(f"\nFull conversation: {api_config.ui_host}/execution/{workflow_id}")
finally:
task_handler.stop_processes()
if __name__ == "__main__":
main()