From 60c4358be4ffda52d13f6337d2f7c041fe5c8614 Mon Sep 17 00:00:00 2001 From: Jared Palmer Date: Thu, 25 May 2023 09:45:18 -0400 Subject: [PATCH 1/8] more docs --- README.md | 18 +++++++++--------- apps/docs/pages/docs/api.mdx | 8 ++++---- apps/docs/pages/docs/getting-started.mdx | 10 +++++----- apps/docs/pages/docs/index.mdx | 6 +++--- 4 files changed, 21 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index a5db393..41855fd 100644 --- a/README.md +++ b/README.md @@ -47,7 +47,7 @@ The goal of this library lies in its commitment to work directly with each AI/Mo ```tsx // app/api/generate/route.ts import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; const config = new Configuration({ apiKey: process.env.OPENAI_API_KEY, @@ -62,7 +62,7 @@ export async function POST() { stream: true, messages: [{ role: 'user', content: 'What is love?' }], }); - const stream = OpenAITextStream(response); + const stream = OpenAIStream(response); return new StreamingTextResponse(stream); } ``` @@ -100,7 +100,7 @@ Create a Next.js Route Handler that uses the Edge Runtime that we'll use to gene ```tsx // ./app/api/generate/route.ts import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; // Create an OpenAI API client (that's edge friendly!) const config = new Configuration({ @@ -122,13 +122,13 @@ export async function POST(req: Request) { prompt, }); // Convert the response into a friendly text-stream - const stream = OpenAITextStream(response); + const stream = OpenAIStream(response); // Respond with the stream return new StreamingTextResponse(stream); } ``` -Vercel AI Utils provides 2 utility helpers to make the above seamless: First, we pass the streaming `response` we receive from OpenAI to `OpenAITextStream`. This method decodes/extracts the text tokens in the response and then re-encodes them properly for simple consumption. We can then pass that new stream directly to `StreamingTextResponse`. This is another utility class that extends the normal Node/Edge Runtime `Response` class with the default headers you probably want (hint: `'Content-Type': 'text/plain; charset=utf-8'` is already set for you). +Vercel AI Utils provides 2 utility helpers to make the above seamless: First, we pass the streaming `response` we receive from OpenAI to `OpenAIStream`. This method decodes/extracts the text tokens in the response and then re-encodes them properly for simple consumption. We can then pass that new stream directly to `StreamingTextResponse`. This is another utility class that extends the normal Node/Edge Runtime `Response` class with the default headers you probably want (hint: `'Content-Type': 'text/plain; charset=utf-8'` is already set for you). ### Wire up the UI @@ -171,7 +171,7 @@ A transform that will extract the text from all chat and completion OpenAI model ```tsx // app/api/generate/route.ts import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; const config = new Configuration({ apiKey: process.env.OPENAI_API_KEY, @@ -186,7 +186,7 @@ export async function POST() { stream: true, messages: [{ role: 'user', content: 'What is love?' }], }); - const stream = OpenAITextStream(response, { + const stream = OpenAIStream(response, { async onStart() { console.log('streamin yo') }, @@ -239,7 +239,7 @@ This is a tiny wrapper around `Response` class that makes returning `ReadableStr ```tsx // app/api/generate/route.ts -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; export const runtime = 'edge'; @@ -249,7 +249,7 @@ export async function POST() { stream: true, messages: { role: 'user', content: 'What is love?' }, }); - const stream = OpenAITextStream(response); + const stream = OpenAIStream(response); return new StreamingTextResponse(stream, { 'X-RATE-LIMIT': 'lol', }); // => new Response(stream, { status: 200, headers: { 'Content-Type': 'text/plain; charset=utf-8', 'X-RATE-LIMIT': 'lol' }}) diff --git a/apps/docs/pages/docs/api.mdx b/apps/docs/pages/docs/api.mdx index 46151f4..bc766ef 100644 --- a/apps/docs/pages/docs/api.mdx +++ b/apps/docs/pages/docs/api.mdx @@ -11,7 +11,7 @@ A transform that will extract the text from all chat and completion OpenAI model ```tsx // app/api/generate/route.ts import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; const config = new Configuration({ apiKey: process.env.OPENAI_API_KEY, @@ -26,7 +26,7 @@ export async function POST() { stream: true, messages: [{ role: 'user', content: 'What is love?' }], }); - const stream = OpenAITextStream(response, { + const stream = OpenAIStream(response, { async onStart() { console.log('streamin yo') }, @@ -79,7 +79,7 @@ This is a tiny wrapper around `Response` class that makes returning `ReadableStr ```tsx // app/api/generate/route.ts -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; export const runtime = 'edge'; @@ -89,7 +89,7 @@ export async function POST() { stream: true, messages: { role: 'user', content: 'What is love?' }, }); - const stream = OpenAITextStream(response); + const stream = OpenAIStream(response); return new StreamingTextResponse(stream, { 'X-RATE-LIMIT': 'lol', }); // => new Response(stream, { status: 200, headers: { 'Content-Type': 'text/plain; charset=utf-8', 'X-RATE-LIMIT': 'lol' }}) diff --git a/apps/docs/pages/docs/getting-started.mdx b/apps/docs/pages/docs/getting-started.mdx index 1cf0f47..391fb80 100644 --- a/apps/docs/pages/docs/getting-started.mdx +++ b/apps/docs/pages/docs/getting-started.mdx @@ -36,7 +36,7 @@ yarn add @vercel/ai-utils ```tsx {3,18-19} // app/api/generate/route.ts import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; const config = new Configuration({ apiKey: process.env.OPENAI_API_KEY, @@ -51,7 +51,7 @@ export async function POST() { stream: true, messages: [{ role: 'user', content: 'What is love?' }], }); - const stream = OpenAITextStream(response); + const stream = OpenAIStream(response); return new StreamingTextResponse(stream); } ``` @@ -89,7 +89,7 @@ Create a Next.js Route Handler that uses the Edge Runtime that we'll use to gene ```tsx // ./app/api/generate/route.ts import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; // Create an OpenAI API client (that's edge friendly!) const config = new Configuration({ @@ -111,13 +111,13 @@ export async function POST(req: Request) { prompt, }); // Convert the response into a friendly text-stream - const stream = OpenAITextStream(response); + const stream = OpenAIStream(response); // Respond with the stream return new StreamingTextResponse(stream); } ``` -Vercel AI Utils provides 2 utility helpers to make the above seamless: First, we pass the streaming `response` we receive from OpenAI to `OpenAITextStream`. This method decodes/extracts the text tokens in the response and then re-encodes them properly for simple consumption. We can then pass that new stream directly to `StreamingTextResponse`. This is another utility class that extends the normal Node/Edge Runtime `Response` class with the default headers you probably want (hint: `'Content-Type': 'text/plain; charset=utf-8'` is already set for you). +Vercel AI Utils provides 2 utility helpers to make the above seamless: First, we pass the streaming `response` we receive from OpenAI to `OpenAIStream`. This method decodes/extracts the text tokens in the response and then re-encodes them properly for simple consumption. We can then pass that new stream directly to `StreamingTextResponse`. This is another utility class that extends the normal Node/Edge Runtime `Response` class with the default headers you probably want (hint: `'Content-Type': 'text/plain; charset=utf-8'` is already set for you). ### Wire up the UI diff --git a/apps/docs/pages/docs/index.mdx b/apps/docs/pages/docs/index.mdx index 5bf226c..b35cb51 100644 --- a/apps/docs/pages/docs/index.mdx +++ b/apps/docs/pages/docs/index.mdx @@ -48,7 +48,7 @@ Another core tenet of this library lies in its commitment to work directly with ```tsx {3,18-19} // app/api/generate/route.ts import { Configuration, OpenAIApi } from 'openai-edge'; -import { OpenAITextStream, StreamingTextResponse } from '@vercel/ai-utils'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; const config = new Configuration({ apiKey: process.env.OPENAI_API_KEY, @@ -63,9 +63,9 @@ export async function POST() { stream: true, messages: [{ role: 'user', content: 'What is love?' }], }); - const stream = OpenAITextStream(response); + const stream = OpenAIStream(response); return new StreamingTextResponse(stream); } ``` -Vercel AI Utils provides 2 utility helpers to make the above seamless: First, we pass the streaming `response` we receive from OpenAI to `OpenAITextStream`. This method decodes/extracts the text tokens in the response and then re-encodes them properly for simple consumption. We can then pass that new stream directly to `StreamingTextResponse`. This is another utility class that extends the normal Node/Edge Runtime `Response` class with the default headers you probably want (hint: `'Content-Type': 'text/plain; charset=utf-8'` is already set for you). +Vercel AI Utils provides 2 utility helpers to make the above seamless: First, we pass the streaming `response` we receive from OpenAI to `OpenAIStream`. This method decodes/extracts the text tokens in the response and then re-encodes them properly for simple consumption. We can then pass that new stream directly to `StreamingTextResponse`. This is another utility class that extends the normal Node/Edge Runtime `Response` class with the default headers you probably want (hint: `'Content-Type': 'text/plain; charset=utf-8'` is already set for you). From aba98609550f5ba08a62f1e62bd02c202bcbf953 Mon Sep 17 00:00:00 2001 From: Jared Palmer Date: Thu, 25 May 2023 09:47:04 -0400 Subject: [PATCH 2/8] Add example app --- example/.gitignore | 35 ++++++++++ example/README.md | 34 +++++++++ example/app/api/generate/route.ts | 31 +++++++++ example/app/chat.tsx | 70 +++++++++++++++++++ example/app/favicon.ico | Bin 0 -> 25931 bytes example/app/globals.css | 27 ++++++++ example/app/layout.tsx | 21 ++++++ example/app/page.tsx | 9 +++ example/next.config.js | 4 ++ example/package.json | 26 +++++++ example/postcss.config.js | 6 ++ example/public/next.svg | 1 + example/public/vercel.svg | 1 + example/tailwind.config.js | 18 +++++ example/tsconfig.json | 28 ++++++++ pnpm-lock.yaml | 111 ++++++++++++++++++------------ pnpm-workspace.yaml | 1 + 17 files changed, 378 insertions(+), 45 deletions(-) create mode 100644 example/.gitignore create mode 100644 example/README.md create mode 100644 example/app/api/generate/route.ts create mode 100644 example/app/chat.tsx create mode 100644 example/app/favicon.ico create mode 100644 example/app/globals.css create mode 100644 example/app/layout.tsx create mode 100644 example/app/page.tsx create mode 100644 example/next.config.js create mode 100644 example/package.json create mode 100644 example/postcss.config.js create mode 100644 example/public/next.svg create mode 100644 example/public/vercel.svg create mode 100644 example/tailwind.config.js create mode 100644 example/tsconfig.json diff --git a/example/.gitignore b/example/.gitignore new file mode 100644 index 0000000..8f322f0 --- /dev/null +++ b/example/.gitignore @@ -0,0 +1,35 @@ +# See https://help.github.com/articles/ignoring-files/ for more about ignoring files. + +# dependencies +/node_modules +/.pnp +.pnp.js + +# testing +/coverage + +# next.js +/.next/ +/out/ + +# production +/build + +# misc +.DS_Store +*.pem + +# debug +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# local env files +.env*.local + +# vercel +.vercel + +# typescript +*.tsbuildinfo +next-env.d.ts diff --git a/example/README.md b/example/README.md new file mode 100644 index 0000000..f4da3c4 --- /dev/null +++ b/example/README.md @@ -0,0 +1,34 @@ +This is a [Next.js](https://nextjs.org/) project bootstrapped with [`create-next-app`](https://github.com/vercel/next.js/tree/canary/packages/create-next-app). + +## Getting Started + +First, run the development server: + +```bash +npm run dev +# or +yarn dev +# or +pnpm dev +``` + +Open [http://localhost:3000](http://localhost:3000) with your browser to see the result. + +You can start editing the page by modifying `app/page.tsx`. The page auto-updates as you edit the file. + +This project uses [`next/font`](https://nextjs.org/docs/basic-features/font-optimization) to automatically optimize and load Inter, a custom Google Font. + +## Learn More + +To learn more about Next.js, take a look at the following resources: + +- [Next.js Documentation](https://nextjs.org/docs) - learn about Next.js features and API. +- [Learn Next.js](https://nextjs.org/learn) - an interactive Next.js tutorial. + +You can check out [the Next.js GitHub repository](https://github.com/vercel/next.js/) - your feedback and contributions are welcome! + +## Deploy on Vercel + +The easiest way to deploy your Next.js app is to use the [Vercel Platform](https://vercel.com/new?utm_medium=default-template&filter=next.js&utm_source=create-next-app&utm_campaign=create-next-app-readme) from the creators of Next.js. + +Check out our [Next.js deployment documentation](https://nextjs.org/docs/deployment) for more details. diff --git a/example/app/api/generate/route.ts b/example/app/api/generate/route.ts new file mode 100644 index 0000000..27ab36e --- /dev/null +++ b/example/app/api/generate/route.ts @@ -0,0 +1,31 @@ +// app/api/generate/route.ts +import { Configuration, OpenAIApi } from 'openai-edge'; +import { OpenAIStream, StreamingTextResponse } from '@vercel/ai-utils'; + +const config = new Configuration({ + apiKey: process.env.OPENAI_API_KEY, +}); +const openai = new OpenAIApi(config); + +export const runtime = 'edge'; + +export async function POST() { + const response = await openai.createChatCompletion({ + model: 'gpt-4', + stream: true, + messages: [{ role: 'user', content: 'What is love?' }], + }); + const stream = OpenAIStream(response, { + async onStart() { + console.log('streamin yo'); + }, + async onToken(token) { + console.log('token: ' + token); + }, + async onCompletion(content) { + console.log('full text: ' + content); + // await prisma.messages.create({ content }) or something + }, + }); + return new StreamingTextResponse(stream); +} diff --git a/example/app/chat.tsx b/example/app/chat.tsx new file mode 100644 index 0000000..3750e62 --- /dev/null +++ b/example/app/chat.tsx @@ -0,0 +1,70 @@ +'use client'; + +import { useChat } from '@vercel/ai-utils'; + +export function Chat() { + const { messages, append } = useChat({ + initialMessages: [], + api: '/api/generate', + parser: async (res, { onCompletion, onToken, onStart }) => { + // This data is a ReadableStream + const data = res.body; + if (!data) { + return new ReadableStream(); + } + if (onStart) { + onStart(); + } + const reader = data.getReader(); + const decoder = new TextDecoder(); + let done = false; + let accumulatedValue = ''; // Variable to accumulate chunks + + while (!done) { + const { value, done: doneReading } = await reader.read(); + done = doneReading; + const chunkValue = decoder.decode(value); + accumulatedValue += chunkValue; // Accumulate the chunk value + + // Check if the accumulated value contains the delimiter + const delimiter = '\n'; + const chunks = accumulatedValue.split(delimiter); + + // Process all chunks except the last one (which may be incomplete) + while (chunks.length > 1) { + const chunkToDispatch = chunks.shift(); // Get the first chunk + if (chunkToDispatch && chunkToDispatch.length > 0) { + const chunk = JSON.parse(chunkToDispatch); + if (onToken) { + onToken(chunk); + } + } + } + + // The last chunk may be incomplete, so keep it in the accumulated value + accumulatedValue = chunks[0]; + } + + // Process any remaining accumulated value after the loop is done + if (accumulatedValue.length > 0) { + if (onCompletion) { + onCompletion(accumulatedValue); + } + } + + return res.body as ReadableStream; + }, + }); + + return ( +
+ {messages && messages.length + ? messages.map((m) =>
{m.content}
) + : null} + +
+