-
Notifications
You must be signed in to change notification settings - Fork 472
Expand file tree
/
Copy pathsampling_stdio.rs
More file actions
120 lines (105 loc) · 3.83 KB
/
sampling_stdio.rs
File metadata and controls
120 lines (105 loc) · 3.83 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
use anyhow::Result;
use rmcp::{
ClientHandler, ServiceExt,
model::*,
object,
service::{RequestContext, RoleClient},
transport::{ConfigureCommandExt, TokioChildProcess},
};
use tokio::process::Command;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
/// Simple Sampling Demo Client
///
/// This client demonstrates how to handle sampling requests from servers.
/// It includes a mock LLM that generates simple responses.
/// Run with: cargo run -p mcp-client-examples --example clients_sampling_stdio
#[derive(Clone, Debug, Default)]
pub struct SamplingDemoClient;
impl SamplingDemoClient {
/// Mock LLM function that generates responses based on the input
/// In actual implementation, this would be replaced with a call to an LLM service
fn mock_llm_response(
&self,
_messages: &[SamplingMessage],
_system_prompt: Option<&str>,
) -> String {
"It just a mock response".to_string()
}
}
impl ClientHandler for SamplingDemoClient {
async fn create_message(
&self,
params: CreateMessageRequestParams,
_context: RequestContext<RoleClient>,
) -> Result<CreateMessageResult, ErrorData> {
tracing::info!("Received sampling request with {:?}", params);
// Generate mock response using our simple LLM
let response_text =
self.mock_llm_response(¶ms.messages, params.system_prompt.as_deref());
Ok(CreateMessageResult::new(
SamplingMessage::assistant_text(response_text),
"mock_llm".to_string(),
)
.with_stop_reason(CreateMessageResult::STOP_REASON_END_TURN))
}
}
#[tokio::main]
async fn main() -> Result<()> {
// Initialize logging
tracing_subscriber::registry()
.with(
tracing_subscriber::EnvFilter::try_from_default_env()
.unwrap_or_else(|_| format!("info,{}=debug", env!("CARGO_CRATE_NAME")).into()),
)
.with(tracing_subscriber::fmt::layer())
.init();
tracing::info!("Starting Sampling Demo Client");
let client = SamplingDemoClient;
// Start the sampling server as a child process
let servers_dir = std::path::Path::new(env!("CARGO_MANIFEST_DIR"))
.parent()
.expect("CARGO_MANIFEST_DIR is not set")
.join("servers");
let client = client
.serve(TokioChildProcess::new(Command::new("cargo").configure(
|cmd| {
cmd.arg("run")
.arg("--example")
.arg("servers_sampling_stdio")
.current_dir(servers_dir);
},
))?)
.await
.inspect_err(|e| {
tracing::error!("client error: {:?}", e);
})?;
// Wait for initialization
tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await;
// Get server info
let server_info = client.peer_info();
tracing::info!("Connected to server: {server_info:#?}");
// List available tools
match client.list_all_tools().await {
Ok(tools) => {
tracing::info!("Available tools: {tools:#?}");
// Test the ask_llm tool
tracing::info!("Testing ask_llm tool...");
match client
.call_tool(
CallToolRequestParams::new("ask_llm").with_arguments(object!({
"question": "Hello world"
})),
)
.await
{
Ok(result) => tracing::info!("Ask LLM result: {result:#?}"),
Err(e) => tracing::error!("Ask LLM error: {e}"),
}
}
Err(e) => tracing::error!("Failed to list tools: {e}"),
}
tracing::info!("Sampling demo completed successfully!");
tokio::time::sleep(tokio::time::Duration::from_millis(1000)).await;
client.cancel().await?;
Ok(())
}