Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
17 changes: 14 additions & 3 deletions apps/mongostory/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ MongoStory is a cloud-native platform designed to empower content creators, edit
- **API Routes**: Next.js API routes for server-side functionality
- **Database**: MongoDB for flexible document storage
- **Vector Search**: MongoDB Atlas Vector Search for semantic content operations
- **AI Integration**: Integration with AI models via AI SDK - xAI (Grok)
- **AI Integration**: Integration with AI models via AI SDK - xAI (Grok) or [MiniMax](https://platform.minimax.io/) (M2.7)

### AI Integration
- **Content Generation**: AI-powered content creation and suggestions
Expand Down Expand Up @@ -71,7 +71,7 @@ MongoStory leverages MongoDB's document model for flexible content storage and i
- Node.js 18+ and npm/yarn
- MongoDB Atlas account
- AI API keys:
- - Grok AI API key
- - Grok AI API key (or MiniMax API key)
- - Voyage AI API Key.

### Installation
Expand All @@ -90,10 +90,21 @@ npm install
```
openssl rand -base64 32
```
- `XAI_API_KEY`: API key for AI services
- `XAI_API_KEY`: API key for AI services (default provider)
- `VOYAGE_API_KEY`: API key for vector embeddings
- `NEXT_PUBLIC_APP_URL`: The main domain of the app (eg. http://localhost:3000).

#### Using MiniMax as the LLM Provider

To use [MiniMax](https://platform.minimax.io/) instead of xAI/Grok, set the following environment variables:

```
LLM_PROVIDER=minimax
MINIMAX_API_KEY=your_minimax_api_key
```

Supported MiniMax models: `MiniMax-M2.7` (default), `MiniMax-M2.7-highspeed`.

### Trigger for content embedding:

Set a `VOYAGE_API_KEY` - Value + Secret on the triggers app.
Expand Down
115 changes: 115 additions & 0 deletions apps/mongostory/__tests__/llm-provider.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,115 @@
import { describe, it, expect, vi, beforeEach, afterEach } from 'vitest';

// Mock @ai-sdk/xai
const mockXaiModel = { modelId: 'grok-model', provider: 'xai' };
vi.mock('@ai-sdk/xai', () => ({
xai: vi.fn().mockReturnValue(mockXaiModel),
}));

// Mock @ai-sdk/openai
const mockMinimaxModel = { modelId: 'minimax-model', provider: 'minimax' };
const mockProviderFn = vi.fn().mockReturnValue(mockMinimaxModel);
vi.mock('@ai-sdk/openai', () => ({
createOpenAI: vi.fn().mockReturnValue(mockProviderFn),
}));

describe('getLLMModel', () => {
const originalEnv = process.env;

beforeEach(() => {
vi.resetModules();
process.env = { ...originalEnv };
});

afterEach(() => {
process.env = originalEnv;
});

it('defaults to xai provider when LLM_PROVIDER is not set', async () => {
delete process.env.LLM_PROVIDER;
const { getLLMModel } = await import('../lib/llm-provider');
const { xai } = await import('@ai-sdk/xai');

const model = getLLMModel();

expect(model).toBeDefined();
expect(xai).toHaveBeenCalledWith('grok-2-1212');
});

it('uses xai provider when LLM_PROVIDER is "xai"', async () => {
process.env.LLM_PROVIDER = 'xai';
const { getLLMModel } = await import('../lib/llm-provider');
const { xai } = await import('@ai-sdk/xai');

getLLMModel();

expect(xai).toHaveBeenCalledWith('grok-2-1212');
});

it('uses minimax provider when LLM_PROVIDER is "minimax"', async () => {
process.env.LLM_PROVIDER = 'minimax';
process.env.MINIMAX_API_KEY = 'test-minimax-key';
const { getLLMModel } = await import('../lib/llm-provider');
const { createOpenAI } = await import('@ai-sdk/openai');

getLLMModel();

expect(createOpenAI).toHaveBeenCalledWith(
expect.objectContaining({
baseURL: 'https://api.minimax.io/v1',
apiKey: 'test-minimax-key',
})
);
});

it('uses custom MINIMAX_BASE_URL when provided', async () => {
process.env.LLM_PROVIDER = 'minimax';
process.env.MINIMAX_API_KEY = 'test-key';
process.env.MINIMAX_BASE_URL = 'https://api.minimaxi.com/v1';
const { getLLMModel } = await import('../lib/llm-provider');
const { createOpenAI } = await import('@ai-sdk/openai');

getLLMModel();

expect(createOpenAI).toHaveBeenCalledWith(
expect.objectContaining({
baseURL: 'https://api.minimaxi.com/v1',
apiKey: 'test-key',
})
);
});

it('uses MiniMax-M2.7 as default model for minimax provider', async () => {
process.env.LLM_PROVIDER = 'minimax';
process.env.MINIMAX_API_KEY = 'test-key';
const { getLLMModel } = await import('../lib/llm-provider');
const { createOpenAI } = await import('@ai-sdk/openai');

getLLMModel();

const providerFn = (createOpenAI as any).mock.results[0].value;
expect(providerFn).toHaveBeenCalledWith('MiniMax-M2.7');
});

it('uses custom modelId when provided for minimax', async () => {
process.env.LLM_PROVIDER = 'minimax';
process.env.MINIMAX_API_KEY = 'test-key';
const { getLLMModel } = await import('../lib/llm-provider');
const { createOpenAI } = await import('@ai-sdk/openai');

getLLMModel({ modelId: 'MiniMax-M2.7-highspeed' });

const providerFn = (createOpenAI as any).mock.results[0].value;
expect(providerFn).toHaveBeenCalledWith('MiniMax-M2.7-highspeed');
});

it('uses custom modelId when provided for xai', async () => {
delete process.env.LLM_PROVIDER;
const { getLLMModel } = await import('../lib/llm-provider');
const { xai } = await import('@ai-sdk/xai');

getLLMModel({ modelId: 'grok-3' });

expect(xai).toHaveBeenCalledWith('grok-3');
});
});
88 changes: 88 additions & 0 deletions apps/mongostory/__tests__/minimax-integration.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,88 @@
import { describe, it, expect } from 'vitest';

const API_KEY = process.env.MINIMAX_API_KEY;
const BASE_URL = process.env.MINIMAX_BASE_URL || 'https://api.minimax.io/v1';

describe.skipIf(!API_KEY)('MiniMax Integration', () => {
it('completes a basic chat request with MiniMax-M2.7', async () => {
const response = await fetch(`${BASE_URL}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${API_KEY}`,
},
body: JSON.stringify({
model: 'MiniMax-M2.7',
messages: [{ role: 'user', content: 'Say "test passed" and nothing else.' }],
max_tokens: 20,
temperature: 1.0,
}),
});

expect(response.ok).toBe(true);
const data = await response.json();
expect(data.choices).toBeDefined();
expect(data.choices.length).toBeGreaterThan(0);
expect(data.choices[0].message.content).toBeTruthy();
}, 30000);

it('generates structured content analysis with MiniMax-M2.7', async () => {
const response = await fetch(`${BASE_URL}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${API_KEY}`,
},
body: JSON.stringify({
model: 'MiniMax-M2.7',
messages: [
{
role: 'system',
content: 'You are a content quality expert. Return a JSON object with readabilityScore (1-10), clarity (1-10), and suggestions (array of strings).',
},
{
role: 'user',
content: 'Evaluate the quality of this content: "MongoDB is a popular NoSQL database that stores data in flexible, JSON-like documents."',
},
],
max_tokens: 200,
temperature: 1.0,
}),
});

expect(response.ok).toBe(true);
const data = await response.json();
expect(data.choices[0].message.content).toBeTruthy();
}, 30000);

it('handles streaming responses', async () => {
const response = await fetch(`${BASE_URL}/chat/completions`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Authorization': `Bearer ${API_KEY}`,
},
body: JSON.stringify({
model: 'MiniMax-M2.7',
messages: [{ role: 'user', content: 'Count from 1 to 5.' }],
max_tokens: 50,
stream: true,
temperature: 1.0,
}),
});

expect(response.ok).toBe(true);
const reader = response.body!.getReader();
const decoder = new TextDecoder();
let chunks = 0;

while (true) {
const { done, value } = await reader.read();
if (done) break;
const text = decoder.decode(value, { stream: true });
if (text.includes('data:')) chunks++;
}

expect(chunks).toBeGreaterThan(1);
}, 30000);
});
4 changes: 2 additions & 2 deletions apps/mongostory/app/api/clusters/regenerate/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ import clientPromise from "@/lib/mongodb"
import { ObjectId } from "mongodb"
import { generateEmbedding } from "@/lib/embeddings"
import { performVectorSearch } from "@/lib/vector-search"
import { xai } from "@ai-sdk/xai"
import { generateText } from "ai"
import { getLLMModel } from "@/lib/llm-provider"

export async function POST() {
try {
Expand Down Expand Up @@ -131,7 +131,7 @@ async function generateClusterLabel(keywords: string[], contentSamples: string[]
// Take a sample of content to provide context
const contentSample = contentSamples.slice(0, 3).join("\n\n").substring(0, 1000)

const model = xai("grok-2-1212")
const model = getLLMModel()
const { text } = await generateText({
model,
prompt: `Generate a concise, descriptive label (3-5 words) for a content cluster with these keywords: ${keywords.join(", ")}.
Expand Down
4 changes: 2 additions & 2 deletions apps/mongostory/app/api/content/[id]/ai-revise/route.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { NextResponse } from "next/server"
import { xai } from "@ai-sdk/xai"
import { generateText } from "ai"
import clientPromise from "@/lib/mongodb"
import { getLLMModel } from "@/lib/llm-provider"
import { ObjectId } from "mongodb"

export async function POST(req: Request, { params }: { params: { id: string } }) {
Expand Down Expand Up @@ -29,7 +29,7 @@ export async function POST(req: Request, { params }: { params: { id: string } })
)
}

const model = xai("grok-2-1212")
const model = getLLMModel()

// Generate revised content based on analysis
const { text: revisedContent } = await generateText({
Expand Down
4 changes: 2 additions & 2 deletions apps/mongostory/app/api/content/[id]/translate/route.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { NextResponse } from "next/server"
import { generateText } from "ai"
import { xai } from "@ai-sdk/xai"
import clientPromise from "@/lib/mongodb"
import { getLLMModel } from "@/lib/llm-provider"
import { ObjectId } from "mongodb"

const SUPPORTED_LANGUAGES = {
Expand Down Expand Up @@ -35,7 +35,7 @@ export async function POST(req: Request, { params }: { params: { id: string } })
return NextResponse.json({ message: "Translation already exists" }, { status: 200 })
}

const model = xai("grok-2-1212")
const model = getLLMModel()

// Translate title
const { text: translatedTitle } = await generateText({
Expand Down
4 changes: 2 additions & 2 deletions apps/mongostory/app/api/generate-content/route.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { generateText } from "ai"
import { xai } from "@ai-sdk/xai"
import { NextResponse } from "next/server"
import { getLLMModel } from "@/lib/llm-provider"

type ExpertiseLevel = "student" | "mid-level" | "expert"

Expand Down Expand Up @@ -40,7 +40,7 @@ const topicSuggestions = {
export async function POST(req: Request) {
try {
const { topic, expertiseLevel } = await req.json()
const model = xai("grok-2-1212")
const model = getLLMModel()

// Generate title with a specific prompt for concise titles
const { text: titleResponse } = await generateText({
Expand Down
12 changes: 6 additions & 6 deletions apps/mongostory/app/api/generate/route.ts
Original file line number Diff line number Diff line change
@@ -1,38 +1,38 @@
import { xai } from "@ai-sdk/xai"
import { generateText } from "ai"
import { NextResponse } from "next/server"
import { getLLMModel } from "@/lib/llm-provider"

export async function POST(req: Request) {
try {
const { content } = await req.json()

// Generate content summary
const { text: summary } = await generateText({
model: xai("grok-2-1212"),
model: getLLMModel(),
prompt: `Summarize the following content in 2-3 sentences: ${content}`,
})

// Generate SEO optimized title
const { text: seoTitle } = await generateText({
model: xai("grok-2-1212"),
model: getLLMModel(),
prompt: `Generate an SEO-optimized title for this content: ${content}`,
})

// Generate SEO description
const { text: seoDescription } = await generateText({
model: xai("grok-2-1212"),
model: getLLMModel(),
prompt: `Write a compelling meta description (under 160 characters) for this content: ${content}`,
})

// Analyze sentiment
const { text: sentiment } = await generateText({
model: xai("grok-2-1212"),
model: getLLMModel(),
prompt: `Analyze the sentiment and emotional tone of this content. Include percentage breakdowns of detected emotions: ${content}`,
})

// Generate tag recommendations
const { text: tagSuggestions } = await generateText({
model: xai("grok-2-1212"),
model: getLLMModel(),
prompt: `Suggest 5-7 relevant tags for this content, separated by commas: ${content}`,
})

Expand Down
4 changes: 2 additions & 2 deletions apps/mongostory/lib/ai-agent.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { xai } from "@ai-sdk/xai"
import { generateText, generateObject } from "ai"
import { z } from "zod"
import clientPromise from "@/lib/mongodb"
import { getLLMModel } from "@/lib/llm-provider"

// Helper function to clamp number within range
const clamp = (num: number, min: number, max: number) => Math.min(Math.max(num, min), max)
Expand Down Expand Up @@ -68,7 +68,7 @@ async function getEnabledFeatures() {
}

export async function analyzeContent(content: string, title: string, selectedFeatures: string[]) {
const model = xai("grok-2-1212")
const model = getLLMModel()
const enabledFeatures = await getEnabledFeatures()
const analysisPromises = []

Expand Down
Loading