'use client';
import * as React from 'react';
import { Plate, usePlateEditor } from 'platejs/react';
import { EditorKit } from '@/components/editor/editor-kit';
import { Editor, EditorContainer } from '@/components/ui/editor';
import { DEMO_VALUES } from './values/demo-values';
export default function Demo({ id }: { id: string }) {
const editor = usePlateEditor({
plugins: EditorKit,
value: DEMO_VALUES[id],
});
return (
<Plate editor={editor}>
<EditorContainer variant="demo">
<Editor />
</EditorContainer>
</Plate>
);
}
Features
- Intelligent Command Menu: Combobox interface with predefined AI commands for generation and editing
- Multiple Trigger Modes:
- Cursor Mode: Trigger at block end with space
- Selection Mode: Trigger with selected text
- Block Selection Mode: Trigger with selected blocks
- Response Modes:
- Chat Mode: Preview responses with accept/reject options
- Insert Mode: Direct content insertion with markdown streaming
- Smart Content Processing: Optimized chunking for tables, code blocks, and complex structures
- Streaming Responses: Real-time AI content generation with support for:
- Table Streaming: Seamless streaming into table cells
- Column Streaming: Direct streaming into column layouts
- MDX Tag Handling: Proper preservation of custom MDX elements during streaming
- Markdown Integration: Full support for Markdown syntax in AI responses
- Customizable Prompts: Template system for user and system prompts
- Built-in Vercel AI SDK Support: Ready-to-use chat API integration
Kit Usage
Installation
The fastest way to add AI functionality is with the AIKit
, which includes pre-configured AIPlugin
and AIChatPlugin
along with cursor overlay and markdown support and their Plate UI components.
'use client';
import type { AIChatPluginConfig } from '@platejs/ai/react';
import { withAIBatch } from '@platejs/ai';
import {
AIChatPlugin,
AIPlugin,
streamInsertChunk,
useChatChunk,
} from '@platejs/ai/react';
import { getPluginType, KEYS, PathApi } from 'platejs';
import { usePluginOption } from 'platejs/react';
import { AILoadingBar, AIMenu } from '@/components/ui/ai-menu';
import { AIAnchorElement, AILeaf } from '@/components/ui/ai-node';
import { useChat } from '../use-chat';
import { CursorOverlayKit } from './cursor-overlay-kit';
import { MarkdownKit } from './markdown-kit';
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {},
},
},
render: {
afterContainer: AILoadingBar,
afterEditable: AIMenu,
node: AIAnchorElement,
},
shortcuts: { show: { keys: 'mod+j' } },
useHooks: ({ editor, getOption }) => {
useChat();
const mode = usePluginOption(
{ key: KEYS.aiChat } as AIChatPluginConfig,
'mode'
);
useChatChunk({
onChunk: ({ chunk, isFirst, nodes }) => {
if (isFirst && mode == 'insert') {
editor.tf.withoutSaving(() => {
editor.tf.insertNodes(
{
children: [{ text: '' }],
type: getPluginType(editor, KEYS.aiChat),
},
{
at: PathApi.next(editor.selection!.focus.path.slice(0, 1)),
}
);
});
editor.setOption(AIChatPlugin, 'streaming', true);
}
if (mode === 'insert' && nodes.length > 0) {
withAIBatch(
editor,
() => {
if (!getOption('streaming')) return;
editor.tf.withScrolling(() => {
streamInsertChunk(editor, chunk, {
textProps: {
[getPluginType(editor, KEYS.ai)]: true,
},
});
});
},
{ split: isFirst }
);
}
},
onFinish: () => {
editor.setOption(AIChatPlugin, 'streaming', false);
editor.setOption(AIChatPlugin, '_blockChunks', '');
editor.setOption(AIChatPlugin, '_blockPath', null);
editor.setOption(AIChatPlugin, '_mdxName', null);
},
});
},
});
export const AIKit = [
...CursorOverlayKit,
...MarkdownKit,
AIPlugin.withComponent(AILeaf),
aiChatPlugin,
];
AIMenu
: Renders the AI command interfaceAILoadingBar
: Shows AI processing statusAIAnchorElement
: Anchor element for the AI MenuAILeaf
: Renders AI-generated content with visual distinction
Add Kit
import { createPlateEditor } from 'platejs/react';
import { AIKit } from '@/components/editor/plugins/ai-kit';
const editor = createPlateEditor({
plugins: [
// ...otherPlugins,
...AIKit,
],
});
Add API Route
AI functionality requires a server-side API endpoint. Add the pre-configured AI command route:
import type {
ChatMessage,
ToolName,
} from '@/components/editor/use-chat';
import type { NextRequest } from 'next/server';
import { google } from '@ai-sdk/google';
import { createOpenAI } from '@ai-sdk/openai';
import { replacePlaceholders } from '@platejs/ai';
import {
convertToModelMessages,
createUIMessageStream,
createUIMessageStreamResponse,
generateObject,
streamObject,
streamText,
} from 'ai';
import { NextResponse } from 'next/server';
import { type SlateEditor, createSlateEditor, nanoid, RangeApi } from 'platejs';
import { z } from 'zod';
import { BaseEditorKit } from '@/components/editor/editor-base-kit';
import { markdownJoinerTransform } from '@/lib/markdown-joiner-transform';
export async function POST(req: NextRequest) {
const { apiKey: key, ctx, messages: messagesRaw } = await req.json();
const { children, selection, toolName: toolNameParam } = ctx;
const editor = createSlateEditor({
plugins: BaseEditorKit,
selection,
value: children,
});
const apiKey = key || process.env.OPENAI_API_KEY;
if (!apiKey) {
return NextResponse.json(
{ error: 'Missing OpenAI API key.' },
{ status: 401 }
);
}
const openai = createOpenAI({ apiKey });
const isSelecting = editor.api.isExpanded();
const isBlockSelecting = isSelectingAllBlocks(editor);
try {
const stream = createUIMessageStream<ChatMessage>({
execute: async ({ writer }) => {
const lastIndex = messagesRaw.findIndex(
(message: any) => message.role === 'user'
);
const messages = [...messagesRaw];
messages[lastIndex] = replaceMessagePlaceholders(
editor,
messages[lastIndex],
{
isSelecting,
}
);
const lastUserMessage = messages[lastIndex];
let toolName = toolNameParam;
if (!toolName) {
const { object: AIToolName } = await generateObject({
enum: ['generate', 'edit', 'comment'],
model: google('gemini-2.5-flash'),
output: 'enum',
prompt: `User message:
${JSON.stringify(lastUserMessage)}`,
system: chooseToolSystem,
});
writer.write({
data: AIToolName as ToolName,
type: 'data-toolName',
});
toolName = AIToolName;
}
if (toolName === 'generate') {
const generateSystem = replacePlaceholders(
editor,
systemTemplate({ isBlockSelecting, isSelecting })
);
const gen = streamText({
experimental_transform: markdownJoinerTransform(),
maxOutputTokens: 2048,
messages: convertToModelMessages(messages),
model: google('gemini-2.5-flash'),
system: generateSystem,
});
writer.merge(gen.toUIMessageStream({ sendFinish: false }));
}
if (toolName === 'edit') {
const editSystem = replacePlaceholders(
editor,
systemTemplate({ isBlockSelecting, isSelecting })
);
const edit = streamText({
experimental_transform: markdownJoinerTransform(),
maxOutputTokens: 2048,
messages: convertToModelMessages(messages),
model: google('gemini-2.5-flash'),
system: editSystem,
});
writer.merge(edit.toUIMessageStream({ sendFinish: false }));
}
if (toolName === 'comment') {
const lastUserMessage = messagesRaw[lastIndex] as ChatMessage;
const prompt = lastUserMessage.parts.find(
(p) => p.type === 'text'
)?.text;
const commentPrompt = replacePlaceholders(
editor,
commentTemplate({ isSelecting }),
{
prompt,
}
);
const { elementStream } = streamObject({
maxOutputTokens: 2048,
model: openai('gpt-4o'),
output: 'array',
prompt: commentPrompt,
schema: z
.object({
blockId: z
.string()
.describe(
'The id of the starting block. If the comment spans multiple blocks, use the id of the first block.'
),
comment: z
.string()
.describe(
'A brief comment or explanation for this fragment.'
),
content: z
.string()
.describe(
String.raw`The original document fragment to be commented on.It can be the entire block, a small part within a block, or span multiple blocks. If spanning multiple blocks, separate them with two \n\n.`
),
})
.describe('A single comment'),
system: commentSystem,
});
// Create a single message ID for the entire comment stream
for await (const comment of elementStream) {
const commentDataId = nanoid();
// Send each comment as a delta
writer.write({
id: commentDataId,
data: comment,
type: 'data-comment',
});
}
return;
}
},
});
return createUIMessageStreamResponse({ stream });
} catch {
return NextResponse.json(
{ error: 'Failed to process AI request' },
{ status: 500 }
);
}
}
const systemTemplate = ({
isBlockSelecting,
isSelecting,
}: {
isBlockSelecting: boolean;
isSelecting: boolean;
}) => {
return isBlockSelecting
? PROMPT_TEMPLATES.systemBlockSelecting
: isSelecting
? PROMPT_TEMPLATES.systemSelecting
: PROMPT_TEMPLATES.systemDefault;
};
const promptTemplate = ({ isSelecting }: { isSelecting: boolean }) => {
return isSelecting
? PROMPT_TEMPLATES.userSelecting
: PROMPT_TEMPLATES.userDefault;
};
const commentTemplate = ({ isSelecting }: { isSelecting: boolean }) => {
return isSelecting
? PROMPT_TEMPLATES.commentSelecting
: PROMPT_TEMPLATES.commentDefault;
};
const chooseToolSystem = `You are a strict classifier. Classify the user's last request as "generate", "edit", or "comment".
Priority rules:
1. Default is "generate". Any open question, idea request, or creation request → "generate".
2. Only return "edit" if the user provides original text (or a selection of text) AND asks to change, rephrase, translate, or shorten it.
3. Only return "comment" if the user explicitly asks for comments, feedback, annotations, or review. Do not infer "comment" implicitly.
Return only one enum value with no explanation.`;
const commentSystem = `You are a document review assistant.
You will receive an MDX document wrapped in <block id="..."> content </block> tags.
Your task:
- Read the content of all blocks and provide comments.
- For each comment, generate a JSON object:
- blockId: the id of the block being commented on.
- content: the original document fragment that needs commenting.
- comments: a brief comment or explanation for that fragment.
Rules:
- IMPORTANT: If a comment spans multiple blocks, use the id of the **first** block.
- The **content** field must be the original content inside the block tag. The returned content must not include the block tags, but should retain other MDX tags.
- IMPORTANT: The **content** field must be flexible:
- It can cover one full block, only part of a block, or multiple blocks.
- If multiple blocks are included, separate them with two \\n\\n.
- Do NOT default to using the entire block—use the smallest relevant span instead.
- At least one comment must be provided.
`;
const systemCommon = `\
You are an advanced AI-powered note-taking assistant, designed to enhance productivity and creativity in note management.
Respond directly to user prompts with clear, concise, and relevant content. Maintain a neutral, helpful tone.
Rules:
- <Document> is the entire note the user is working on.
- <Reminder> is a reminder of how you should reply to INSTRUCTIONS. It does not apply to questions.
- Anything else is the user prompt.
- Your response should be tailored to the user's prompt, providing precise assistance to optimize note management.
- For INSTRUCTIONS: Follow the <Reminder> exactly. Provide ONLY the content to be inserted or replaced. No explanations or comments.
- For QUESTIONS: Provide a helpful and concise answer. You may include brief explanations if necessary.
- CRITICAL: DO NOT remove or modify the following custom MDX tags: <u>, <callout>, <kbd>, <toc>, <sub>, <sup>, <mark>, <del>, <date>, <span>, <column>, <column_group>, <file>, <audio>, <video> in <Selection> unless the user explicitly requests this change.
- CRITICAL: Distinguish between INSTRUCTIONS and QUESTIONS. Instructions typically ask you to modify or add content. Questions ask for information or clarification.
- CRITICAL: when asked to write in markdown, do not start with \`\`\`markdown.
- CRITICAL: When writing the column, such line breaks and indentation must be preserved.
<column_group>
<column>
1
</column>
<column>
2
</column>
<column>
3
</column>
</column_group>
`;
const systemDefault = `\
${systemCommon}
- <Block> is the current block of text the user is working on.
- Ensure your output can seamlessly fit into the existing <Block> structure.
<Block>
{block}
</Block>
`;
const systemSelecting = `\
${systemCommon}
- <Block> is the block of text containing the user's selection, providing context.
- Ensure your output can seamlessly fit into the existing <Block> structure.
- <Selection> is the specific text the user has selected in the block and wants to modify or ask about.
- Consider the context provided by <Block>, but only modify <Selection>. Your response should be a direct replacement for <Selection>.
<Block>
{block}
</Block>
<Selection>
{selection}
</Selection>
`;
const systemBlockSelecting = `\
${systemCommon}
- <Selection> represents the full blocks of text the user has selected and wants to modify or ask about.
- Your response should be a direct replacement for the entire <Selection>.
- Maintain the overall structure and formatting of the selected blocks, unless explicitly instructed otherwise.
- CRITICAL: Provide only the content to replace <Selection>. Do not add additional blocks or change the block structure unless specifically requested.
<Selection>
{block}
</Selection>
`;
const userDefault = `<Reminder>
CRITICAL: NEVER write <Block>.
</Reminder>
{prompt}`;
const userSelecting = `<Reminder>
If this is a question, provide a helpful and concise answer about <Selection>.
If this is an instruction, provide ONLY the text to replace <Selection>. No explanations.
Ensure it fits seamlessly within <Block>. If <Block> is empty, write ONE random sentence.
NEVER write <Block> or <Selection>.
</Reminder>
{prompt} about <Selection>`;
const commentSelecting = `{prompt}:
{blockWithBlockId}
`;
const commentDefault = `{prompt}:
{editorWithBlockId}
`;
const PROMPT_TEMPLATES = {
commentDefault,
commentSelecting,
systemBlockSelecting,
systemDefault,
systemSelecting,
userDefault,
userSelecting,
};
const replaceMessagePlaceholders = (
editor: SlateEditor,
message: ChatMessage,
{ isSelecting }: { isSelecting: boolean }
): ChatMessage => {
const template = promptTemplate({ isSelecting });
const parts = message.parts.map((part) => {
if (part.type !== 'text' || !part.text) return part;
const text = replacePlaceholders(editor, template, {
prompt: part.text,
});
return { ...part, text } as typeof part;
});
return { ...message, parts };
};
/** Check if the current selection fully covers all top-level blocks. */
const isSelectingAllBlocks = (editor: SlateEditor) => {
const blocksRange = editor.api.nodesRange(
editor.api.blocks({ mode: 'highest' })
);
return (
!!blocksRange &&
!!editor.selection &&
RangeApi.equals(blocksRange, editor.selection)
);
};
Configure Environment
Ensure your OpenAI API key is set in your environment variables:
OPENAI_API_KEY="your-api-key"
Manual Usage
Installation
pnpm add @platejs/ai @platejs/selection @platejs/markdown @platejs/basic-nodes
Add Plugins
import { AIPlugin, AIChatPlugin } from '@platejs/ai/react';
import { createPlateEditor } from 'platejs/react';
import { MarkdownKit } from '@/components/editor/plugins/markdown-kit';
const editor = createPlateEditor({
plugins: [
// ...otherPlugins,
...MarkdownKit, // Required for AI content processing
AIPlugin,
AIChatPlugin,
],
});
MarkdownKit
: Required for processing AI responses with Markdown syntax and MDX support.AIPlugin
: Core plugin for AI content management and transforms.AIChatPlugin
: Handles AI chat interface, streaming, and user interactions.
Configure Plugins
Create the extended aiChatPlugin
with basic configuration:
import type { AIChatPluginConfig } from '@platejs/ai/react';
import type { UseChatOptions } from 'ai/react';
import { KEYS, PathApi } from 'platejs';
import { streamInsertChunk, withAIBatch } from '@platejs/ai';
import { AIChatPlugin, AIPlugin, useChatChunk } from '@platejs/ai/react';
import { usePluginOption } from 'platejs/react';
import { MarkdownKit } from '@/components/editor/plugins/markdown-kit';
import { AILoadingBar, AIMenu } from '@/components/ui/ai-menu';
import { AIAnchorElement, AILeaf } from '@/components/ui/ai-node';
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {},
} as UseChatOptions,
},
render: {
afterContainer: AILoadingBar,
afterEditable: AIMenu,
node: AIAnchorElement,
},
shortcuts: { show: { keys: 'mod+j' } },
});
const plugins = [
// ...otherPlugins,
...MarkdownKit,
AIPlugin.withComponent(AILeaf),
aiChatPlugin,
];
chatOptions
: Configuration for the Vercel AI SDKuseChat
hook.render
: UI components for the AI interface.shortcuts
: Keyboard shortcuts (Cmd+J
to show AI menu).
Add Streaming with useHooks
The useChatChunk
hook processes streaming AI responses in real-time, handling content insertion and chunk management. It monitors the chat state and processes incoming text chunks, inserting them into the editor as they arrive:
export const aiChatPlugin = AIChatPlugin.extend({
// ... previous options
useHooks: ({ editor, getOption }) => {
const mode = usePluginOption(
{ key: KEYS.aiChat } as AIChatPluginConfig,
'mode'
);
useChatChunk({
onChunk: ({ chunk, isFirst, nodes }) => {
if (isFirst && mode == 'insert') {
editor.tf.withoutSaving(() => {
editor.tf.insertNodes(
{
children: [{ text: '' }],
type: KEYS.aiChat,
},
{
at: PathApi.next(editor.selection!.focus.path.slice(0, 1)),
}
);
});
editor.setOption(AIChatPlugin, 'streaming', true);
}
if (mode === 'insert' && nodes.length > 0) {
withAIBatch(
editor,
() => {
if (!getOption('streaming')) return;
editor.tf.withScrolling(() => {
streamInsertChunk(editor, chunk, {
textProps: {
ai: true,
},
});
});
},
{ split: isFirst }
);
}
},
onFinish: () => {
editor.setOption(AIChatPlugin, 'streaming', false);
editor.setOption(AIChatPlugin, '_blockChunks', '');
editor.setOption(AIChatPlugin, '_blockPath', null);
},
});
},
});
onChunk
: Handles each streaming chunk, creating AI nodes on first chunk and inserting content in real-timeonFinish
: Cleans up streaming state when the response completes- Uses
withAIBatch
andstreamInsertChunk
for optimized content insertion
System Prompt
The system prompt defines the AI's role and behavior. You can customize the systemTemplate
in your extended plugin:
export const customAIChatPlugin = AIChatPlugin.extend({
options: {
systemTemplate: ({ isBlockSelecting, isSelecting }) => {
const customSystem = `You are a technical documentation assistant specialized in code and API documentation.
Rules:
- Provide accurate, well-structured technical content
- Use appropriate code formatting and syntax highlighting
- Include relevant examples and best practices
- Maintain consistent documentation style
- CRITICAL: DO NOT remove or modify custom MDX tags unless explicitly requested.
- CRITICAL: Distinguish between INSTRUCTIONS and QUESTIONS.`;
return isBlockSelecting
? `${customSystem}
- <Selection> represents the full blocks of text the user has selected and wants to modify or ask about.
- Your response should be a direct replacement for the entire <Selection>.
- Maintain the overall structure and formatting of the selected blocks, unless explicitly instructed otherwise.
<Selection>
{block}
</Selection>`
: isSelecting
? `${customSystem}
- <Block> is the block of text containing the user's selection, providing context.
- <Selection> is the specific text the user has selected in the block and wants to modify or ask about.
- Consider the context provided by <Block>, but only modify <Selection>.
<Block>
{block}
</Block>
<Selection>
{selection}
</Selection>`
: `${customSystem}
- <Block> is the current block of text the user is working on.
<Block>
{block}
</Block>`;
},
// ...other options
},
});
User Prompt
Customize how user prompts are formatted and contextualized in your extended plugin:
export const customAIChatPlugin = AIChatPlugin.extend({
options: {
promptTemplate: ({ isBlockSelecting, isSelecting }) => {
return isBlockSelecting
? `<Reminder>
If this is a question, provide a helpful and concise answer about <Selection>.
If this is an instruction, provide ONLY the content to replace the entire <Selection>. No explanations.
Analyze and improve the following content blocks maintaining structure and clarity.
NEVER write <Block> or <Selection>.
</Reminder>
{prompt} about <Selection>`
: isSelecting
? `<Reminder>
If this is a question, provide a helpful and concise answer about <Selection>.
If this is an instruction, provide ONLY the text to replace <Selection>. No explanations.
Ensure it fits seamlessly within <Block>. If <Block> is empty, write ONE random sentence.
NEVER write <Block> or <Selection>.
</Reminder>
{prompt} about <Selection>`
: `<Reminder>
CRITICAL: NEVER write <Block>.
Continue or improve the content naturally.
</Reminder>
{prompt}`;
},
// ...other options
},
});
Add API Route
Create an API route handler with optimized streaming for different content types:
import type { NextRequest } from 'next/server';
import { createOpenAI } from '@ai-sdk/openai';
import { convertToCoreMessages, streamText } from 'ai';
import { NextResponse } from 'next/server';
import { markdownJoinerTransform } from '@/registry/lib/markdown-joiner-transform';
export async function POST(req: NextRequest) {
const { apiKey: key, messages, system } = await req.json();
const apiKey = key || process.env.OPENAI_API_KEY;
if (!apiKey) {
return NextResponse.json(
{ error: 'Missing OpenAI API key.' },
{ status: 401 }
);
}
const openai = createOpenAI({ apiKey });
try {
const result = streamText({
experimental_transform: markdownJoinerTransform(),
maxTokens: 2048,
messages: convertToCoreMessages(messages),
model: openai('gpt-4o'),
system: system,
});
return result.toDataStreamResponse();
} catch {
return NextResponse.json(
{ error: 'Failed to process AI request' },
{ status: 500 }
);
}
}
Then, set your OPENAI_API_KEY
in .env.local
.
Add Toolbar Button
You can add AIToolbarButton
to your Toolbar to open the AI menu.
Keyboard Shortcuts
Key | Description |
---|---|
Space | Open AI menu in empty block (cursor mode) |
Cmd + J | Open AI menu (cursor or selection mode) |
Escape | Close AI menu |
Streaming Example
'use client';
import {
type HTMLAttributes,
useCallback,
useReducer,
useRef,
useState,
} from 'react';
import { AIChatPlugin, streamInsertChunk } from '@platejs/ai/react';
import { deserializeMd } from '@platejs/markdown';
import {
ChevronFirstIcon,
ChevronLastIcon,
PauseIcon,
PlayIcon,
RotateCcwIcon,
} from 'lucide-react';
import { getPluginType, KEYS } from 'platejs';
import { Plate, usePlateEditor, usePlateViewEditor } from 'platejs/react';
import { Button } from '@/components/ui/button';
import { cn } from '@/lib/utils';
import { EditorKit } from '@/components/editor/editor-kit';
import { CopilotKit } from '@/components/editor/plugins/copilot-kit';
import { MarkdownJoiner } from '@/lib/markdown-joiner-transform';
import { Editor, EditorContainer, EditorView } from '@/components/ui/editor';
import { BaseEditorKit } from '../components/editor/editor-base-kit';
const testScenarios = {
// Basic markdown with complete elements
columns: [
'paragraph\n\n<column',
'_group',
'>\n',
' ',
' <',
'column',
' width',
'="',
'33',
'.',
'333',
'333',
'333',
'333',
'336',
'%">\n',
' ',
' ',
'1',
'\n',
' ',
' </',
'column',
'>\n',
' ',
' <',
'column',
' width',
'="',
'33',
'.',
'333',
'333',
'333',
'333',
'336',
'%">\n',
' ',
' ',
'2',
'\n',
' ',
' </',
'column',
'>\n',
' ',
' <',
'column',
' width',
'="',
'33',
'.',
'333',
'333',
'333',
'333',
'336',
'%">\n',
' ',
' ',
'3',
'\n',
' ',
' </',
'column',
'>\n',
'</',
'column',
'_group',
'>\n\nparagraph',
],
links: [
'[Link ',
'to OpenA',
'I](https://www.openai.com)\n\n',
'[Link ',
'to Google',
'I](https://ww',
'w.google.com/1',
'11',
'22',
'xx',
'yy',
'zz',
'aa',
'bb',
'cc',
'dd',
'ee',
'33)\n\n',
'[False Positive',
'11',
'22',
'33',
'44',
'55',
'66',
'77',
'88',
'99',
'100',
],
lists: ['1.', ' number 1\n', '- ', 'List B\n', '-', ' [x] ', 'Task C'],
listWithImage: [
'## ',
'Links ',
'and ',
'Images\n\n',
'- [Link ',
'to OpenA',
'I](https://www.openai.com)\n',
'- \n\n',
],
nestedStructureBlock: [
'```',
'javascript',
'\n',
'import',
' React',
' from',
" '",
'react',
"';\n",
'import',
' {',
' Plate',
' }',
' from',
" '@",
'ud',
'ecode',
'/',
'plate',
"';\n\n",
'const',
' Basic',
'Editor',
' =',
' ()',
' =>',
' {\n',
' ',
' return',
' (\n',
' ',
' <',
'Plate',
'>\n',
' ',
' {/*',
' Add',
' your',
' plugins',
' and',
' components',
' here',
' */}\n',
' ',
' </',
'Plate',
'>\n',
' ',
' );\n',
'};\n\n',
'export',
' default',
' Basic',
'Editor',
';\n',
'```',
],
table: [
'| Feature |',
' Plate',
'.js',
' ',
' ',
'| Slate.js ',
' ',
'|\n|------------------',
'|--------------------------------',
'---------------',
'|--------------------------------',
'---------------',
'|\n| Purpose ',
' ',
'| Rich text editor framework',
' ',
' ',
'| Rich text editor framework',
' ',
' ',
'|\n| Flexibility ',
' ',
'| Highly customizable',
' with',
' plugins',
' ',
' ',
'| Highly customizable',
' with',
' plugins',
' ',
' ',
'|\n| Community ',
' ',
'| Growing community support',
' ',
' ',
'| Established community',
' support',
' ',
' ',
'|\n| Documentation ',
' ',
'| Comprehensive documentation',
' available',
' ',
' ',
'| Comprehensive documentation',
' available',
' ',
' ',
'|\n| Performance ',
' ',
'| Optimized for performance',
' with',
' large',
' documents',
'| Good performance, but',
' may',
' require',
' optimization',
'|\n| Integration ',
' ',
'| Easy integration with',
' React',
' ',
' ',
'| Easy integration with',
' React',
' ',
' ',
'|\n| Use Cases ',
' ',
'| Suitable for complex',
' editing',
' needs',
' ',
' ',
'| Suitable for complex',
' editing',
' needs',
' ',
' ',
'\n\n',
'Paragraph ',
'should ',
'exist ',
'from ',
'table',
],
};
export default function MarkdownStreamingDemo() {
const [selectedScenario, setSelectedScenario] =
useState<keyof typeof testScenarios>('columns');
const [activeIndex, setActiveIndex] = useState<number>(0);
const isPauseRef = useRef(false);
const streamSessionRef = useRef(0);
const [, forceUpdate] = useReducer((x) => x + 1, 0);
const [streaming, setStreaming] = useState(false);
const [isPlateStatic, setIsPlateStatic] = useState(false);
const [speed, setSpeed] = useState<number | null>(null);
const editor = usePlateEditor(
{
plugins: [...CopilotKit, ...EditorKit],
value: [],
},
[]
);
const editorStatic = usePlateViewEditor(
{
plugins: BaseEditorKit,
},
[]
);
const currentChunks = testScenarios[selectedScenario];
const transformedCurrentChunks = transformedChunks(currentChunks);
const onStreaming = useCallback(async () => {
setStreaming(true);
streamSessionRef.current += 1;
const sessionId = streamSessionRef.current;
isPauseRef.current = false;
setActiveIndex(0);
// editor.tf.setValue([]);
editor.setOption(AIChatPlugin, 'streaming', false);
editor.setOption(AIChatPlugin, '_blockChunks', '');
editor.setOption(AIChatPlugin, '_blockPath', null);
for (let i = 0; i < transformedCurrentChunks.length; i++) {
while (isPauseRef.current) {
if (sessionId !== streamSessionRef.current) return;
await new Promise((resolve) => setTimeout(resolve, 100));
}
if (sessionId !== streamSessionRef.current) return;
setActiveIndex(i + 1);
const chunk = transformedCurrentChunks[i];
streamInsertChunk(editor, chunk.chunk, {
textProps: {
[getPluginType(editor, KEYS.ai)]: true,
},
});
await new Promise((resolve) =>
setTimeout(resolve, speed ?? chunk.delayInMs)
);
if (sessionId !== streamSessionRef.current) return;
}
setStreaming(false);
}, [editor, transformedCurrentChunks, speed]);
const onStreamingStatic = useCallback(async () => {
let output = '';
setStreaming(true);
streamSessionRef.current += 1;
for (const chunk of transformedCurrentChunks) {
output += chunk.chunk;
editorStatic.children = deserializeMd(editorStatic, output);
setActiveIndex((prev) => prev + 1);
forceUpdate();
await new Promise((resolve) =>
setTimeout(resolve, speed ?? chunk.delayInMs)
);
}
setStreaming(false);
}, [editorStatic, speed, transformedCurrentChunks]);
const onReset = useCallback(() => {
setActiveIndex(0);
if (isPlateStatic) {
editorStatic.children = [];
forceUpdate();
} else {
editor.tf.setValue([]);
editor.setOption(AIChatPlugin, 'streaming', false);
editor.setOption(AIChatPlugin, '_blockChunks', '');
editor.setOption(AIChatPlugin, '_blockPath', null);
}
}, [editor, editorStatic, isPlateStatic]);
const onNavigate = useCallback(
(targetIndex: number) => {
// Check if navigation is possible
if (targetIndex < 0 || targetIndex > transformedCurrentChunks.length)
return;
if (isPlateStatic) {
let output = '';
for (const chunk of transformedCurrentChunks.slice(0, targetIndex)) {
output += chunk.chunk;
}
editorStatic.children = deserializeMd(editorStatic, output);
setActiveIndex(targetIndex);
forceUpdate();
} else {
editor.tf.setValue([]);
editor.setOption(AIChatPlugin, 'streaming', false);
editor.setOption(AIChatPlugin, '_blockChunks', '');
editor.setOption(AIChatPlugin, '_blockPath', null);
for (const chunk of transformedCurrentChunks.slice(0, targetIndex)) {
streamInsertChunk(editor, chunk.chunk, {
textProps: {
[getPluginType(editor, KEYS.ai)]: true,
},
});
}
setActiveIndex(targetIndex);
}
},
[editor, editorStatic, isPlateStatic, transformedCurrentChunks]
);
const onPrev = useCallback(
() => onNavigate(activeIndex - 1),
[onNavigate, activeIndex]
);
const onNext = useCallback(
() => onNavigate(activeIndex + 1),
[onNavigate, activeIndex]
);
return (
<section className="h-full overflow-y-auto p-20">
<div className="mb-10 rounded bg-gray-100 p-4">
{/* Scenario Selection */}
<div className="mb-4">
<span className="mb-2 block text-sm font-medium">Test Scenario:</span>
<select
className="w-64 rounded border px-3 py-2"
value={selectedScenario}
onChange={(e) => {
setSelectedScenario(e.target.value as keyof typeof testScenarios);
setActiveIndex(0);
editor.tf.setValue([]);
}}
>
{Object.entries(testScenarios).map(([key]) => (
<option key={key} value={key}>
{key
.replace(/([A-Z])/g, ' $1')
.replace(/^./, (str) => str.toUpperCase())}
</option>
))}
</select>
</div>
{/* Control Buttons */}
<div className="mb-4 flex items-center gap-2">
<Button onClick={onPrev}>
<ChevronFirstIcon />
</Button>
<Button
onClick={() => {
if (streaming) {
isPauseRef.current = !isPauseRef.current;
forceUpdate();
} else {
if (isPlateStatic) {
onStreamingStatic();
} else {
onStreaming();
}
}
}}
>
{isPauseRef.current || !streaming ? <PlayIcon /> : <PauseIcon />}
</Button>
<Button onClick={onNext}>
<ChevronLastIcon />
</Button>
<Button onClick={() => onReset()}>
<RotateCcwIcon />
</Button>
<Button
onClick={() => {
setIsPlateStatic(!isPlateStatic);
onReset();
}}
>
Switch to {isPlateStatic ? 'Plate' : 'PlateStatic'}
</Button>
</div>
<div className="mb-4 flex items-center gap-2">
<span className="block text-sm font-medium">Speed:</span>
<select
className="rounded border px-2 py-1"
value={speed ?? 'default'}
onChange={(e) =>
setSpeed(
e.target.value === 'default' ? null : Number(e.target.value)
)
}
>
{['default', 10, 100, 200].map((ms) => (
<option key={ms} value={ms}>
{ms === 'default'
? 'Default'
: ms === 10
? 'Fast(10ms)'
: ms === 100
? 'Medium(100ms)'
: ms === 200
? 'Slow(200ms)'
: `${ms}ms`}
</option>
))}
</select>
<span className="text-sm text-muted-foreground">
The default speed is 10ms, but it adjusts to 100ms when streaming a
table or code block.
</span>
</div>
<div className="my-4 h-2 w-full rounded bg-gray-200">
<div
className="h-2 rounded bg-primary transition-all duration-300"
style={{
width: `${(activeIndex / (transformedCurrentChunks.length || 1)) * 100}%`,
}}
/>
</div>
<span className="text-sm text-muted-foreground">
PlateStatic offers more robust and flawless performance.
</span>
</div>
<div className="my-2 flex gap-10">
<div className="w-1/2">
<h3 className="mb-2 font-semibold">
Transformed Chunks ({activeIndex}/{transformedCurrentChunks.length})
</h3>
<Tokens
activeIndex={activeIndex}
chunkClick={onNavigate}
chunks={splitChunksByLinebreak(
transformedCurrentChunks.map((c) => c.chunk)
)}
/>
</div>
<div className="w-1/2">
<h3 className="mb-2 font-semibold">Editor Output</h3>
{isPlateStatic ? (
<EditorView
className="h-[500px] overflow-y-auto rounded border"
editor={editorStatic}
/>
) : (
<>
<Plate editor={editor}>
<EditorContainer className="h-[500px] overflow-y-auto rounded border">
<Editor
variant="demo"
className="pb-[20vh]"
placeholder="Type something..."
spellCheck={false}
/>
</EditorContainer>
</Plate>
</>
)}
</div>
</div>
<h2 className="mt-8 mb-4 text-xl font-semibold">Raw Token Comparison</h2>
<div className="my-2 flex gap-10">
<div className="w-1/2">
<h3 className="mb-2 font-semibold">Original Chunks</h3>
<Tokens
activeIndex={0}
chunks={splitChunksByLinebreak(currentChunks)}
/>
</div>
<div className="w-1/2">
<h3 className="mb-2 font-semibold">Raw Markdown Text</h3>
<textarea
className={cn(
'h-[500px] w-full overflow-y-auto rounded border p-4 font-mono text-sm'
)}
readOnly
value={currentChunks.join('')}
/>
</div>
</div>
</section>
);
}
type TChunks = {
chunks: {
index: number;
text: string;
}[];
linebreaks: number;
};
function splitChunksByLinebreak(chunks: string[]) {
const result: TChunks[] = [];
let current: { index: number; text: string }[] = [];
for (let i = 0; i < chunks.length; i++) {
const chunk = chunks[i];
current.push({ index: i, text: chunk });
const match = /(\n+)$/.exec(chunk);
if (match) {
const linebreaks = match[1].length;
result.push({
chunks: [...current],
linebreaks,
});
current = [];
}
}
if (current.length > 0) {
result.push({
chunks: [...current],
linebreaks: 0,
});
}
return result;
}
type TChunk = { chunk: string; delayInMs: number };
const transformedChunks = (chunks: string[]): TChunk[] => {
const result: TChunk[] = [];
const joiner = new MarkdownJoiner();
for (const chunk of chunks) {
const processed = joiner.processText(chunk);
if (processed) {
result.push({ chunk: processed, delayInMs: joiner.delayInMs });
}
}
// flush any remaining buffered content
const remaining = joiner.flush();
if (remaining) {
result.push({ chunk: remaining, delayInMs: joiner.delayInMs });
}
return result;
};
const Tokens = ({
activeIndex,
chunkClick,
chunks,
...props
}: {
activeIndex: number;
chunks: TChunks[];
chunkClick?: (index: number) => void;
} & HTMLAttributes<HTMLDivElement>) => {
return (
<div
className="my-1 h-[500px] overflow-y-auto rounded bg-gray-100 p-4 font-mono"
{...props}
>
{chunks.map((chunk, index) => {
return (
<div key={index} className="py-1">
{chunk.chunks.map((c, j) => {
const lineBreak = c.text.replaceAll('\n', '⤶');
const space = lineBreak.replaceAll(' ', '␣');
return (
<span
key={j}
className={cn(
'mx-1 inline-block rounded border p-1',
activeIndex && c.index < activeIndex && 'bg-amber-400'
)}
onClick={() => chunkClick && chunkClick(c.index + 1)}
>
{space}
</span>
);
})}
</div>
);
})}
</div>
);
};
Plate Plus
Combobox menu with free-form prompt input
- Additional trigger methods:
- Block menu button
- Slash command menu
- Beautifully crafted UI
Customization
Adding Custom AI Commands
'use client';
import * as React from 'react';
import {
AIChatPlugin,
AIPlugin,
useEditorChat,
useLastAssistantMessage,
} from '@platejs/ai/react';
import { getTransientCommentKey } from '@platejs/comment';
import { BlockSelectionPlugin, useIsSelecting } from '@platejs/selection/react';
import { Command as CommandPrimitive } from 'cmdk';
import {
Album,
BadgeHelp,
BookOpenCheck,
Check,
CornerUpLeft,
FeatherIcon,
ListEnd,
ListMinus,
ListPlus,
Loader2Icon,
PauseIcon,
PenLine,
SmileIcon,
Wand,
X,
} from 'lucide-react';
import {
type NodeEntry,
type SlateEditor,
isHotkey,
KEYS,
NodeApi,
TextApi,
} from 'platejs';
import {
useEditorPlugin,
useFocusedLast,
useHotkeys,
usePluginOption,
} from 'platejs/react';
import { type PlateEditor, useEditorRef } from 'platejs/react';
import { Button } from '@/components/ui/button';
import {
Command,
CommandGroup,
CommandItem,
CommandList,
} from '@/components/ui/command';
import {
Popover,
PopoverAnchor,
PopoverContent,
} from '@/components/ui/popover';
import { cn } from '@/lib/utils';
import { commentPlugin } from '../components/editor/plugins/comment-kit';
import { AIChatEditor } from './ai-chat-editor';
export function AIMenu() {
const { api, editor } = useEditorPlugin(AIChatPlugin);
const mode = usePluginOption(AIChatPlugin, 'mode');
const toolName = usePluginOption(AIChatPlugin, 'toolName');
const streaming = usePluginOption(AIChatPlugin, 'streaming');
const isSelecting = useIsSelecting();
const isFocusedLast = useFocusedLast();
const open = usePluginOption(AIChatPlugin, 'open') && isFocusedLast;
const [value, setValue] = React.useState('');
const [input, setInput] = React.useState('');
const chat = usePluginOption(AIChatPlugin, 'chat');
const { messages, status } = chat;
const [anchorElement, setAnchorElement] = React.useState<HTMLElement | null>(
null
);
const content = useLastAssistantMessage()?.parts.find(
(part) => part.type === 'text'
)?.text;
React.useEffect(() => {
if (streaming) {
const anchor = api.aiChat.node({ anchor: true });
setTimeout(() => {
const anchorDom = editor.api.toDOMNode(anchor![0])!;
setAnchorElement(anchorDom);
}, 0);
}
// eslint-disable-next-line react-hooks/exhaustive-deps
}, [streaming]);
const setOpen = (open: boolean) => {
if (open) {
api.aiChat.show();
} else {
api.aiChat.hide();
}
};
const show = (anchorElement: HTMLElement) => {
setAnchorElement(anchorElement);
setOpen(true);
};
useEditorChat({
chat,
onOpenBlockSelection: (blocks: NodeEntry[]) => {
show(editor.api.toDOMNode(blocks.at(-1)![0])!);
},
onOpenChange: (open) => {
if (!open) {
setAnchorElement(null);
setInput('');
}
},
onOpenCursor: () => {
const [ancestor] = editor.api.block({ highest: true })!;
if (!editor.api.isAt({ end: true }) && !editor.api.isEmpty(ancestor)) {
editor
.getApi(BlockSelectionPlugin)
.blockSelection.set(ancestor.id as string);
}
show(editor.api.toDOMNode(ancestor)!);
},
onOpenSelection: () => {
show(editor.api.toDOMNode(editor.api.blocks().at(-1)![0])!);
},
});
useHotkeys('esc', () => {
api.aiChat.stop();
// remove when you implement the route /api/ai/command
(chat as any)._abortFakeStream();
});
const isLoading = status === 'streaming' || status === 'submitted';
if (isLoading && mode === 'insert') return null;
if (toolName === 'comment') return null;
return (
<Popover open={open} onOpenChange={setOpen} modal={false}>
<PopoverAnchor virtualRef={{ current: anchorElement! }} />
<PopoverContent
className="border-none bg-transparent p-0 shadow-none"
style={{
width: anchorElement?.offsetWidth,
}}
onEscapeKeyDown={(e) => {
e.preventDefault();
api.aiChat.hide();
}}
align="center"
side="bottom"
>
<Command
className="w-full rounded-lg border shadow-md"
value={value}
onValueChange={setValue}
>
{mode === 'chat' && isSelecting && content && (
<AIChatEditor content={content} />
)}
{isLoading ? (
<div className="flex grow items-center gap-2 p-2 text-sm text-muted-foreground select-none">
<Loader2Icon className="size-4 animate-spin" />
{messages.length > 1 ? 'Editing...' : 'Thinking...'}
</div>
) : (
<CommandPrimitive.Input
className={cn(
'flex h-9 w-full min-w-0 border-input bg-transparent px-3 py-1 text-base transition-[color,box-shadow] outline-none placeholder:text-muted-foreground md:text-sm dark:bg-input/30',
'aria-invalid:border-destructive aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40',
'border-b focus-visible:ring-transparent'
)}
value={input}
onKeyDown={(e) => {
if (isHotkey('backspace')(e) && input.length === 0) {
e.preventDefault();
api.aiChat.hide();
}
if (isHotkey('enter')(e) && !e.shiftKey && !value) {
e.preventDefault();
void api.aiChat.submit(input);
setInput('');
}
}}
onValueChange={setInput}
placeholder="Ask AI anything..."
data-plate-focus
autoFocus
/>
)}
{!isLoading && (
<CommandList>
<AIMenuItems
input={input}
setInput={setInput}
setValue={setValue}
/>
</CommandList>
)}
</Command>
</PopoverContent>
</Popover>
);
}
type EditorChatState =
| 'cursorCommand'
| 'cursorSuggestion'
| 'selectionCommand'
| 'selectionSuggestion';
const aiChatItems = {
accept: {
icon: <Check />,
label: 'Accept',
value: 'accept',
onSelect: ({ editor }) => {
editor.getTransforms(AIChatPlugin).aiChat.accept();
editor.tf.focus({ edge: 'end' });
},
},
continueWrite: {
icon: <PenLine />,
label: 'Continue writing',
value: 'continueWrite',
onSelect: ({ editor, input }) => {
const ancestorNode = editor.api.block({ highest: true });
if (!ancestorNode) return;
const isEmpty = NodeApi.string(ancestorNode[0]).trim().length === 0;
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
mode: 'insert',
prompt: isEmpty
? `<Document>
{editor}
</Document>
Start writing a new paragraph AFTER <Document> ONLY ONE SENTENCE`
: 'Continue writing AFTER <Block> ONLY ONE SENTENCE. DONT REPEAT THE TEXT.',
toolName: 'generate',
});
},
},
discard: {
icon: <X />,
label: 'Discard',
shortcut: 'Escape',
value: 'discard',
onSelect: ({ editor, input }) => {
editor.getTransforms(AIPlugin).ai.undo();
editor.getApi(AIChatPlugin).aiChat.hide();
},
},
emojify: {
icon: <SmileIcon />,
label: 'Emojify',
value: 'emojify',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Emojify',
toolName: 'edit',
});
},
},
explain: {
icon: <BadgeHelp />,
label: 'Explain',
value: 'explain',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: {
default: 'Explain {editor}',
selecting: 'Explain',
},
toolName: 'generate',
});
},
},
fixSpelling: {
icon: <Check />,
label: 'Fix spelling & grammar',
value: 'fixSpelling',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Fix spelling and grammar',
toolName: 'edit',
});
},
},
generateMarkdownSample: {
icon: <BookOpenCheck />,
label: 'Generate Markdown sample',
value: 'generateMarkdownSample',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Generate a markdown sample',
toolName: 'generate',
});
},
},
generateMdxSample: {
icon: <BookOpenCheck />,
label: 'Generate MDX sample',
value: 'generateMdxSample',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Generate a mdx sample',
toolName: 'generate',
});
},
},
improveWriting: {
icon: <Wand />,
label: 'Improve writing',
value: 'improveWriting',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Improve the writing',
toolName: 'edit',
});
},
},
insertBelow: {
icon: <ListEnd />,
label: 'Insert below',
value: 'insertBelow',
onSelect: ({ aiEditor, editor }) => {
/** Format: 'none' Fix insert table */
void editor
.getTransforms(AIChatPlugin)
.aiChat.insertBelow(aiEditor, { format: 'none' });
},
},
makeLonger: {
icon: <ListPlus />,
label: 'Make longer',
value: 'makeLonger',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Make longer',
toolName: 'edit',
});
},
},
makeShorter: {
icon: <ListMinus />,
label: 'Make shorter',
value: 'makeShorter',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Make shorter',
toolName: 'edit',
});
},
},
replace: {
icon: <Check />,
label: 'Replace selection',
value: 'replace',
onSelect: ({ aiEditor, editor }) => {
void editor.getTransforms(AIChatPlugin).aiChat.replaceSelection(aiEditor);
},
},
simplifyLanguage: {
icon: <FeatherIcon />,
label: 'Simplify language',
value: 'simplifyLanguage',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
prompt: 'Simplify the language',
toolName: 'edit',
});
},
},
summarize: {
icon: <Album />,
label: 'Add a summary',
value: 'summarize',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.submit(input, {
mode: 'insert',
prompt: {
default: 'Summarize {editor}',
selecting: 'Summarize',
},
toolName: 'generate',
});
},
},
tryAgain: {
icon: <CornerUpLeft />,
label: 'Try again',
value: 'tryAgain',
onSelect: ({ editor, input }) => {
void editor.getApi(AIChatPlugin).aiChat.reload();
},
},
} satisfies Record<
string,
{
icon: React.ReactNode;
label: string;
value: string;
component?: React.ComponentType<{ menuState: EditorChatState }>;
filterItems?: boolean;
items?: { label: string; value: string }[];
shortcut?: string;
onSelect?: ({
aiEditor,
editor,
input,
}: {
aiEditor: SlateEditor;
editor: PlateEditor;
input: string;
}) => void;
}
>;
const menuStateItems: Record<
EditorChatState,
{
items: (typeof aiChatItems)[keyof typeof aiChatItems][];
heading?: string;
}[]
> = {
cursorCommand: [
{
items: [
aiChatItems.generateMdxSample,
aiChatItems.generateMarkdownSample,
aiChatItems.continueWrite,
aiChatItems.summarize,
aiChatItems.explain,
],
},
],
cursorSuggestion: [
{
items: [aiChatItems.accept, aiChatItems.discard, aiChatItems.tryAgain],
},
],
selectionCommand: [
{
items: [
aiChatItems.improveWriting,
aiChatItems.emojify,
aiChatItems.makeLonger,
aiChatItems.makeShorter,
aiChatItems.fixSpelling,
aiChatItems.simplifyLanguage,
],
},
],
selectionSuggestion: [
{
items: [
aiChatItems.replace,
aiChatItems.insertBelow,
aiChatItems.discard,
aiChatItems.tryAgain,
],
},
],
};
export const AIMenuItems = ({
input,
setInput,
setValue,
}: {
input: string;
setInput: (value: string) => void;
setValue: (value: string) => void;
}) => {
const editor = useEditorRef();
const { messages } = usePluginOption(AIChatPlugin, 'chat');
const aiEditor = usePluginOption(AIChatPlugin, 'aiEditor')!;
const isSelecting = useIsSelecting();
const menuState = React.useMemo(() => {
if (messages && messages.length > 0) {
return isSelecting ? 'selectionSuggestion' : 'cursorSuggestion';
}
return isSelecting ? 'selectionCommand' : 'cursorCommand';
}, [isSelecting, messages]);
const menuGroups = React.useMemo(() => {
const items = menuStateItems[menuState];
return items;
}, [menuState]);
React.useEffect(() => {
if (menuGroups.length > 0 && menuGroups[0].items.length > 0) {
setValue(menuGroups[0].items[0].value);
}
}, [menuGroups, setValue]);
return (
<>
{menuGroups.map((group, index) => (
<CommandGroup key={index} heading={group.heading}>
{group.items.map((menuItem) => (
<CommandItem
key={menuItem.value}
className="[&_svg]:text-muted-foreground"
value={menuItem.value}
onSelect={() => {
menuItem.onSelect?.({
aiEditor,
editor: editor,
input,
});
setInput('');
}}
>
{menuItem.icon}
<span>{menuItem.label}</span>
</CommandItem>
))}
</CommandGroup>
))}
</>
);
};
export function AILoadingBar() {
const editor = useEditorRef();
const { setOptions } = useEditorPlugin(AIChatPlugin);
const toolName = usePluginOption(AIChatPlugin, 'toolName');
const chat = usePluginOption(AIChatPlugin, 'chat');
const mode = usePluginOption(AIChatPlugin, 'mode');
const { setMessages, status } = chat;
const { api } = useEditorPlugin(AIChatPlugin);
const isLoading = status === 'streaming' || status === 'submitted';
const handleReject = () => {
api.aiChat.hide();
editor.getTransforms(commentPlugin).comment.unsetMark({ transient: true });
setMessages?.([]);
setOptions({
mode: 'insert',
toolName: 'generate',
});
};
const handleAccept = () => {
api.aiChat.hide();
setMessages?.([]);
editor.tf.unsetNodes([getTransientCommentKey()], {
at: [],
match: (n) => TextApi.isText(n) && !!n[KEYS.comment],
});
setOptions({
mode: 'insert',
toolName: 'generate',
});
};
useHotkeys('esc', () => {
api.aiChat.stop();
// remove when you implement the route /api/ai/command
(chat as any)._abortFakeStream();
});
if (isLoading && (mode === 'insert' || toolName === 'comment')) {
return (
<div
className={cn(
'absolute bottom-4 left-1/2 z-20 flex -translate-x-1/2 items-center gap-3 rounded-md border border-border bg-muted px-3 py-1.5 text-sm text-muted-foreground shadow-md transition-all duration-300'
)}
>
<span className="h-4 w-4 animate-spin rounded-full border-2 border-muted-foreground border-t-transparent" />
<span>{status === 'submitted' ? 'Thinking...' : 'Writing...'}</span>
<Button
size="sm"
variant="ghost"
className="flex items-center gap-1 text-xs"
onClick={() => api.aiChat.stop()}
>
<PauseIcon className="h-4 w-4" />
Stop
<kbd className="ml-1 rounded bg-border px-1 font-mono text-[10px] text-muted-foreground shadow-sm">
Esc
</kbd>
</Button>
</div>
);
}
if (toolName === 'comment' && status === 'ready') {
return (
<div
className={cn(
'absolute bottom-4 left-1/2 z-50 flex -translate-x-1/2 flex-col items-center gap-0 rounded-xl border border-border/50 bg-popover p-1 text-sm text-muted-foreground shadow-xl backdrop-blur-sm',
'p-3'
)}
>
{/* Header with controls */}
<div className="flex w-full items-center justify-between gap-3">
<div className="flex items-center gap-5">
<Button size="sm" disabled={isLoading} onClick={handleAccept}>
Accept
</Button>
<Button size="sm" disabled={isLoading} onClick={handleReject}>
Reject
</Button>
</div>
</div>
</div>
);
}
return null;
}
You can extend the AI menu with custom commands by adding new items to the aiChatItems
object and updating the menu state items.
Simple Custom Command
Add a basic command that submits a custom prompt:
// Add to your ai-menu.tsx aiChatItems object
summarizeInBullets: {
icon: <ListIcon />,
label: 'Summarize in bullets',
value: 'summarizeInBullets',
onSelect: ({ editor }) => {
void editor.getApi(AIChatPlugin).aiChat.submit({
prompt: 'Summarize this content as bullet points',
});
},
},
Command with Complex Logic
Create commands with client-side logic before submission:
generateTOC: {
icon: <BookIcon />,
label: 'Generate table of contents',
value: 'generateTOC',
onSelect: ({ editor }) => {
// Check if document has headings
const headings = editor.api.nodes({
match: (n) => ['h1', 'h2', 'h3'].includes(n.type as string),
});
if (headings.length === 0) {
void editor.getApi(AIChatPlugin).aiChat.submit({
mode: 'insert',
prompt: 'Create a table of contents with sample headings for this document',
});
} else {
void editor.getApi(AIChatPlugin).aiChat.submit({
mode: 'insert',
prompt: 'Generate a table of contents based on the existing headings',
});
}
},
},
Understanding Menu States
The AI menu adapts to different contexts based on user selection and AI response state:
const menuState = React.useMemo(() => {
// If AI has already responded, show suggestion actions
if (messages && messages.length > 0) {
return isSelecting ? 'selectionSuggestion' : 'cursorSuggestion';
}
// If no AI response yet, show command actions
return isSelecting ? 'selectionCommand' : 'cursorCommand';
}, [isSelecting, messages]);
Menu States:
cursorCommand
: No selection, no AI response → Show generation commands (Continue writing, Summarize, etc.)selectionCommand
: Text selected, no AI response → Show editing commands (Improve writing, Fix spelling, etc.)cursorSuggestion
: No selection, AI responded → Show suggestion actions (Accept, Discard, Try again)selectionSuggestion
: Text selected, AI responded → Show replacement actions (Replace selection, Insert below, etc.)
Update Menu States
Add your custom commands to the appropriate menu states in menuStateItems
:
const menuStateItems: Record<EditorChatState, { items: any[] }[]> = {
cursorCommand: [
{
items: [
aiChatItems.generateTOC,
aiChatItems.summarizeInBullets,
// ... existing items
],
},
],
selectionCommand: [
{
items: [
aiChatItems.summarizeInBullets, // Works for selected text too
// ... existing items
],
},
],
// ... other states
};
Switching AI Models
Configure different AI models and providers in your API route:
import { createOpenAI } from '@ai-sdk/openai';
import { createAnthropic } from '@ai-sdk/anthropic';
export async function POST(req: NextRequest) {
const { model = 'gpt-4o', provider = 'openai', ...rest } = await req.json();
let aiProvider;
switch (provider) {
case 'anthropic':
aiProvider = createAnthropic({ apiKey: process.env.ANTHROPIC_API_KEY });
break;
case 'openai':
default:
aiProvider = createOpenAI({ apiKey: process.env.OPENAI_API_KEY });
break;
}
const result = streamText({
model: aiProvider(model),
// ... other options
});
return result.toDataStreamResponse();
}
Configure the model in your aiChatPlugin
:
export const aiChatPlugin = AIChatPlugin.extend({
options: {
chatOptions: {
api: '/api/ai/command',
body: {
model: 'gpt-4o-mini', // or 'claude-4-sonnet'
provider: 'openai', // or 'anthropic'
},
},
// ... other options
},
});
For more AI providers and models, see the Vercel AI SDK documentation.
Custom Streaming Optimization
Optimize streaming performance for specific content types with custom chunking strategies:
const customChunking = (buffer: string) => {
// Detect JSON content for slower chunking
if (buffer.includes('{') && buffer.includes('}')) {
const jsonMatch = /\{[^}]*\}/g.exec(buffer);
if (jsonMatch) {
return buffer.slice(0, jsonMatch.index + jsonMatch[0].length);
}
}
// Detect code blocks for line-based chunking
if (buffer.includes('```')) {
const lineMatch = /\n+/m.exec(buffer);
return lineMatch
? buffer.slice(0, lineMatch.index + lineMatch[0].length)
: null;
}
// Default word chunking
const wordMatch = /\S+\s+/m.exec(buffer);
return wordMatch
? buffer.slice(0, wordMatch.index + wordMatch[0].length)
: null;
};
// Use in your streamText configuration
const result = streamText({
experimental_transform: smoothStream({
chunking: customChunking,
delayInMs: (buffer) => {
// Slower for complex content, faster for simple text
return buffer.includes('```') || buffer.includes('{') ? 80 : 20;
},
}),
// ... other options
});
Security Considerations
Implement security best practices for AI functionality:
export async function POST(req: NextRequest) {
const { messages, system } = await req.json();
// Validate request structure
if (!messages || !Array.isArray(messages)) {
return NextResponse.json({ error: 'Invalid messages' }, { status: 400 });
}
// Content length validation
const totalContent = messages.map((m) => m.content).join('');
if (totalContent.length > 50000) {
return NextResponse.json({ error: 'Content too long' }, { status: 413 });
}
// Rate limiting (implement with your preferred solution)
// await rateLimit(req);
// Content filtering (optional)
// const filteredMessages = await filterContent(messages);
// Process AI request...
}
Security Guidelines:
- Validate Input: Always validate and sanitize user prompts
- Rate Limiting: Implement rate limiting on AI endpoints
- Content Filtering: Consider content filtering for responses
- API Key Security: Never expose API keys client-side
- User Privacy: Be mindful of data sent to AI models
Plugins
AIPlugin
Core plugin that extends the editor with AI content management capabilities.
AIChatPlugin
Main plugin that enables AI chat operations, streaming, and user interface interactions.
Configuration options for the Vercel AI SDK useChat
hook. - api
: API
endpoint for AI requests - body
: Additional request body parameters
Specifies how assistant messages are handled: - 'chat'
: Shows preview
with accept/reject options - 'insert'
: Directly inserts content into
editor - Default: 'chat'
Whether the AI chat interface is open. - Default: false
Whether AI response is currently streaming. - Default: false
Template for generating user prompts. Supports placeholders: - {block}
:
Markdown of blocks in selection - {editor}
: Markdown of entire editor
content - {selection}
: Markdown of current selection - {prompt}
:
Actual user prompt - Default: '{prompt}'
Template for system messages. Supports same placeholders as
promptTemplate
.
Template for generating AI comment prompts. Supports same placeholders as
promptTemplate
.
The editor instance used to generate AI responses. - Default: null
Chat helpers returned by useChat
hook.
API
api.aiChat.accept()
Accepts the current AI suggestion:
- Removes AI marks from the content
- Hides the AI chat interface
- Focuses the editor
api.aiChat.insertBelow()
Inserts AI-generated content below the current block.
Handles both block selection and normal selection modes:
- In block selection: Inserts after the last selected block, applying formatting from the last block
- In normal selection: Inserts after the current block, applying formatting from the current block
api.aiChat.replaceSelection()
Replaces the current selection with AI-generated content.
Handles different selection modes:
- Single block selection: Replaces the selected block, applying its formatting to inserted content based on format option
- Multiple block selection: Replaces all selected blocks
- With
format: 'none'
or'single'
: Preserves original formatting - With
format: 'all'
: Applies first block's formatting to all content
- With
- Normal selection: Replaces the current selection while maintaining surrounding context
api.aiChat.reset()
Resets the chat state:
- Stops any ongoing generation
- Clears chat messages
- Removes all AI nodes from the editor
api.aiChat.node()
Gets the AI chat node entry.
api.aiChat.reload()
Reloads the current AI chat:
- In insert mode: Undoes previous AI changes
- Reloads the chat with the current system prompt
api.aiChat.show()
Shows the AI chat interface:
- Resets the chat state
- Clears messages
- Sets the open state to true
api.aiChat.hide()
Hides the AI chat interface:
- Resets the chat state
- Sets the open state to false
- Focuses the editor
- Removes the AI anchor
api.aiChat.stop()
Stops the current AI generation:
- Sets streaming state to false
- Calls the chat stop function
api.aiChat.submit()
Submits a prompt to generate AI content.
- Default:
'chat'
for selection,'insert'
otherwise - Default: Uses chat input if not provided
Mode to use. In insert mode, undoes previous AI changes before submitting.
Custom prompt to submit.
Custom system message for this request.
Tool name for tracking AI tools used in submissions.
api.aiChat.submitComment()
Submits selected text for AI comment generation.
Utilities
aiCommentToRange
Converts AI comments to text ranges with proper block mapping for accurate comment positioning.
findTextRangeInBlock
Finds text ranges within blocks for accurate comment positioning. Uses a Longest Common Subsequence (LCS) algorithm for fuzzy text matching.
submitAIComment
Submits text for AI comment generation.
Transforms
tf.aiChat.removeAnchor()
Removes the AI chat anchor node from the editor.
tf.aiChat.accept()
Accepts the current AI suggestion and integrates it into the editor content.
tf.aiChat.insertBelow()
Transform that inserts AI content below the current block.
tf.aiChat.replaceSelection()
Transform that replaces the current selection with AI content.
tf.ai.insertNodes()
Inserts AI-generated nodes with the AI mark.
tf.ai.removeMarks()
Removes AI marks from nodes in the specified location.
tf.ai.removeNodes()
Removes nodes that have the AI mark.
tf.ai.undo()
Special undo operation for AI changes:
- Undoes the last operation if it was AI-generated
- Removes the redo stack entry to prevent redoing AI operations
Streaming Behavior
Enhanced Empty Paragraph Handling
The AI streaming system intelligently handles empty paragraphs:
- Only removes truly empty paragraphs when starting to stream
- Preserves paragraphs containing only whitespace or formatting marks
- Prevents accidental content loss during streaming initialization
Table and Column Support
AI streaming seamlessly works within complex structures:
Tables:
- Streams directly into table cells without disrupting table structure
- Maintains table formatting during streaming
- Properly handles cell boundaries
Columns:
- Supports streaming into column layouts
- Preserves column width and structure
- Enables AI content generation within multi-column documents
MDX Tag Preservation
During streaming, the system:
- Detects and preserves custom MDX tags
- Prevents MDX content from being incorrectly parsed as Markdown
- Maintains proper nesting of MDX elements
- Supports streaming of content containing MDX components
Hooks
useAIChatEditor
A hook that registers an editor in the AI chat plugin and deserializes markdown content with block-level memoization.
const AIChatEditor = ({ content }: { content: string }) => {
const aiEditor = usePlateEditor({
plugins: [
// Your editor plugins
MarkdownPlugin,
AIPlugin,
AIChatPlugin,
// etc...
],
});
useAIChatEditor(aiEditor, content, {
// Optional markdown parser options
parser: {
exclude: ['space'],
},
});
return <Editor editor={aiEditor} />;
};
On This Page
FeaturesKit UsageInstallationAdd KitAdd API RouteConfigure EnvironmentManual UsageInstallationAdd PluginsConfigure PluginsAdd Streaming with useHooksSystem PromptUser PromptAdd API RouteAdd Toolbar ButtonKeyboard ShortcutsStreaming ExamplePlate PlusCustomizationAdding Custom AI CommandsSimple Custom CommandCommand with Complex LogicUnderstanding Menu StatesUpdate Menu StatesSwitching AI ModelsCustom Streaming OptimizationSecurity ConsiderationsPluginsAIPluginAIChatPluginAPIapi.aiChat.accept()api.aiChat.insertBelow()api.aiChat.replaceSelection()api.aiChat.reset()api.aiChat.node()api.aiChat.reload()api.aiChat.show()api.aiChat.hide()api.aiChat.stop()api.aiChat.submit()api.aiChat.submitComment()UtilitiesaiCommentToRangefindTextRangeInBlocksubmitAICommentTransformstf.aiChat.removeAnchor()tf.aiChat.accept()tf.aiChat.insertBelow()tf.aiChat.replaceSelection()tf.ai.insertNodes()tf.ai.removeMarks()tf.ai.removeNodes()tf.ai.undo()Streaming BehaviorEnhanced Empty Paragraph HandlingTable and Column SupportMDX Tag PreservationHooksuseAIChatEditor