Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
61 changes: 61 additions & 0 deletions examples/mastra.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
import { Agent } from '@mastra/core/agent';
import { AgentRunServer, type AgentRequest } from '../src/server';
import {
MastraConverter,
type AgentEventItem,
model,
toolset,
} from '../src/integration/mastra';

import { logger } from '../src/utils/log';

const mastraAgent = new Agent({
id: 'run_agent',
name: 'AgentRun',
instructions: `
你是一个智能助手,你会帮助用户完成各种任务。你的输出后,必须是返向输出的。

如,用户输入 “你好”,应该输出 “?么的您助帮以可么什有,好您”
`.trim(),
model: () => model({ name: 'ohyee-test' }),
tools: () => toolset({ name: 'start-mcp-time-ggda' }),
});

async function* invokeAgent(
request: AgentRequest,
): AsyncGenerator<AgentEventItem> {
const converter = new MastraConverter();
const mastraStream = await mastraAgent.stream(
request.messages.map(
(msg) =>
({
role: msg.role,
content: msg.content || '',
}) as any,
),
);
for await (const chunk of mastraStream.fullStream) {
const events = converter.convert(chunk);

for (const event of events) {
yield event;
}
}
}

const server = new AgentRunServer({
invokeAgent,
config: { corsOrigins: ['*'] },
});

logger.info(`
curl http://127.0.0.1:9000/openai/v1/chat/completions -X POST \\
-H "Content-Type: application/json" \\
-d \'{"messages": [{"role": "user", "content": "Hello!"}], "stream": true}\'

curl http://127.0.0.1:9000/ag-ui/agent -X POST \\
-H "Content-Type: application/json" \\
-d \'{"messages": [{"role": "user", "content": "Hello!"}]}\'
`);

server.start({ port: 9000 });
30 changes: 19 additions & 11 deletions examples/model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -13,10 +13,10 @@
* npm run example:model
*/

import { ModelClient, ResourceAlreadyExistError, ResourceNotExistError, Status, BackendType, ModelType, ModelService, ModelProxy } from '../src/index';
import type { ModelServiceCreateInput, ModelServiceUpdateInput, ModelProxyCreateInput, ModelProxyUpdateInput, ProviderSettings, ProxyConfig } from '../src/index';
import { logger } from '../src/utils/log';
import type { ModelProxyCreateInput, ModelServiceCreateInput, ProviderSettings, ProxyConfig } from '../src/index';
import { ModelClient, ModelProxy, ModelService, ModelType, ResourceAlreadyExistError, ResourceNotExistError, Status } from '../src/index';
import { Config } from '../src/utils/config';
import { logger } from '../src/utils/log';

// Logger helper
function log(message: string, ...args: unknown[]) {
Expand Down Expand Up @@ -68,7 +68,7 @@ async function createOrGetModelService(): Promise<ModelService> {

// 等待就绪 / Wait for ready
await ms.waitUntilReadyOrFailed({
beforeCheck: (service: ModelService) =>
callback: (service) =>
log(` 当前状态 / Current status: ${service.status}`),
});

Expand Down Expand Up @@ -112,7 +112,11 @@ async function updateModelService(ms: ModelService): Promise<void> {
async function listModelServices(): Promise<void> {
log('枚举资源列表 / Listing resources');

const services = await ModelService.list({ modelType: ModelType.LLM });
const services = await ModelService.list({
input: {
modelType: ModelType.LLM
}
});
log(
`共有 ${services.length} 个资源,分别为 / Total ${services.length} resources:`,
services.map((s) => s.modelServiceName)
Expand All @@ -131,8 +135,10 @@ async function invokeModelService(ms: ModelService): Promise<void> {
});

// 流式输出 / Stream output
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
if ('textStream' in result && result.textStream) {
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
}
logger.info(''); // 换行
}
Expand Down Expand Up @@ -198,7 +204,7 @@ async function createOrGetModelProxy(): Promise<ModelProxy> {

// 等待就绪 / Wait for ready
await mp.waitUntilReadyOrFailed({
beforeCheck: (proxy: ModelProxy) =>
callback: (proxy) =>
log(` 当前状态 / Current status: ${proxy.status}`),
});

Expand Down Expand Up @@ -257,14 +263,16 @@ async function listModelProxies(): Promise<void> {
async function invokeModelProxy(mp: ModelProxy): Promise<void> {
log('调用模型代理进行推理 / Invoking model proxy for inference');

const result = await mp.completions({
const result = await mp.completion({
messages: [{ role: 'user', content: '你好,请介绍一下你自己' }],
stream: true,
});

// 流式输出 / Stream output
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
if ('textStream' in result && result.textStream) {
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
}
logger.info(''); // 换行
}
Expand Down
Loading
Loading