Skip to content

Commit 7e0d6ce

Browse files
authored
Merge pull request #94 from token-js/pate/open-router
feat: Support OpenRouter
2 parents 2fea7e7 + 50a0584 commit 7e0d6ce

File tree

13 files changed

+202
-18
lines changed

13 files changed

+202
-18
lines changed

.changeset/eight-goats-jam.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
'token.js': patch
3+
---
4+
5+
Support OpenRouter

.env.example

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,9 @@ GROQ_API_KEY=
1919
# Mistral
2020
MISTRAL_API_KEY=
2121

22+
# OpenRouter
23+
OPENROUTER_API_KEY=
24+
2225
# Perplexity
2326
PERPLEXITY_API_KEY=
2427

README.md

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
11
# Token.js
22

3-
Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required.
3+
Integrate 200+ LLMs with one TypeScript SDK using OpenAI's format. Free and open source. No proxy server required.
44

55
## Features
66

7-
* Use OpenAI's format to call 60+ LLMs from 9 providers.
7+
* Use OpenAI's format to call 200+ LLMs from 10 providers.
88
* Supports tools, JSON outputs, image inputs, streaming, and more.
99
* Runs completely on the client side. No proxy server needed.
1010
* Free and open source under MIT.
@@ -20,6 +20,7 @@ Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format. Free and open
2020
* Mistral
2121
* OpenAI
2222
* Perplexity
23+
* OpenRouter
2324

2425
## [Documentation](https://docs.tokenjs.ai/)
2526

docs/README.md

Lines changed: 38 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,14 +1,14 @@
11
---
22
description: >-
3-
Integrate 60+ LLMs with one TypeScript SDK using OpenAI's format.
3+
Integrate 200+ LLMs with one TypeScript SDK using OpenAI's format.
44
Free and open source. No proxy server required.
55
---
66

77
# Token.js
88

99
## Features
1010

11-
* Use OpenAI's format to call 60+ LLMs from 9 providers.
11+
* Use OpenAI's format to call 200+ LLMs from 10 providers.
1212
* Supports tools, JSON outputs, image inputs, streaming, and more.
1313
* Runs completely on the client side. No proxy server needed.
1414
* Free and open source under MIT.
@@ -24,6 +24,7 @@ description: >-
2424
* Mistral
2525
* OpenAI
2626
* Perplexity
27+
* OpenRouter
2728

2829
## Setup
2930

@@ -261,6 +262,39 @@ async function main() {
261262
main()
262263
```
263264
{% endtab %}
265+
266+
{% tab title="OpenRouter" %}
267+
{% code title=".env" %}
268+
```bash
269+
OPENROUTER_API_KEY=<openrouter api key>
270+
```
271+
{% endcode %}
272+
273+
```typescript
274+
import { TokenJS } from 'token.js'
275+
276+
// Create the Token.js client
277+
const tokenjs = new TokenJS()
278+
279+
async function main() {
280+
// Create a model response
281+
const completion = await tokenjs.chat.completions.create({
282+
// Specify the provider and model
283+
provider: 'openrouter',
284+
model: 'nvidia/nemotron-4-340b-instruct',
285+
// Define your message
286+
messages: [
287+
{
288+
role: 'user',
289+
content: 'Hello!',
290+
},
291+
],
292+
})
293+
console.log(completion.choices[0])
294+
}
295+
main()
296+
```
297+
{% endtab %}
264298
{% endtabs %}
265299

266300
### Access Credentials
@@ -284,6 +318,8 @@ GROQ_API_KEY=
284318
MISTRAL_API_KEY=
285319
# Perplexity
286320
PERPLEXITY_API_KEY=
321+
# OpenRouter
322+
OPENROUTER_API_KEY=
287323
# AWS Bedrock
288324
AWS_REGION_NAME=
289325
AWS_ACCESS_KEY_ID=

docs/SUMMARY.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@
1010
* [Groq](providers/groq.md)
1111
* [Mistral](providers/mistral.md)
1212
* [OpenAI](providers/openai.md)
13+
* [OpenRouter](providers/openrouter.md)
1314
* [Perplexity](providers/perplexity.md)
1415
* [Contact Us](contact-us.md)
1516
* [Contributing](https://github.com/token-js/token.js/blob/main/CONTRIBUTING.md)

docs/providers/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -4,4 +4,4 @@ description: Integrate LLM providers and models using Token.js.
44

55
# Providers
66

7-
<table data-card-size="large" data-view="cards"><thead><tr><th></th><th data-hidden></th><th data-hidden></th><th data-hidden data-card-target data-type="content-ref"></th></tr></thead><tbody><tr><td>AI21</td><td></td><td></td><td><a href="ai21.md">ai21.md</a></td></tr><tr><td>Anthropic</td><td></td><td></td><td><a href="anthropic.md">anthropic.md</a></td></tr><tr><td>Bedrock</td><td></td><td></td><td><a href="bedrock.md">bedrock.md</a></td></tr><tr><td>Cohere</td><td></td><td></td><td><a href="cohere.md">cohere.md</a></td></tr><tr><td>Gemini</td><td></td><td></td><td><a href="gemini.md">gemini.md</a></td></tr><tr><td>Groq</td><td></td><td></td><td><a href="groq.md">groq.md</a></td></tr><tr><td>Mistral</td><td></td><td></td><td><a href="mistral.md">mistral.md</a></td></tr><tr><td>OpenAI</td><td></td><td></td><td><a href="openai.md">openai.md</a></td></tr><tr><td>Perplexity</td><td></td><td></td><td><a href="perplexity.md">perplexity.md</a></td></tr></tbody></table>
7+
<table data-card-size="large" data-view="cards"><thead><tr><th></th><th data-hidden></th><th data-hidden></th><th data-hidden data-card-target data-type="content-ref"></th></tr></thead><tbody><tr><td>AI21</td><td></td><td></td><td><a href="ai21.md">ai21.md</a></td></tr><tr><td>Anthropic</td><td></td><td></td><td><a href="anthropic.md">anthropic.md</a></td></tr><tr><td>Bedrock</td><td></td><td></td><td><a href="bedrock.md">bedrock.md</a></td></tr><tr><td>Cohere</td><td></td><td></td><td><a href="cohere.md">cohere.md</a></td></tr><tr><td>Gemini</td><td></td><td></td><td><a href="gemini.md">gemini.md</a></td></tr><tr><td>Groq</td><td></td><td></td><td><a href="groq.md">groq.md</a></td></tr><tr><td>Mistral</td><td></td><td></td><td><a href="mistral.md">mistral.md</a></td></tr><tr><td>OpenAI</td><td></td><td></td><td><a href="openai.md">openai.md</a></td></tr><tr><td>Perplexity</td><td></td><td></td><td><a href="perplexity.md">perplexity.md</a></td></tr><tr><td>OpenRouter</td><td></td><td></td><td><a href="openrouter.md">openrouter.md</a></td></tr></tbody></table>

docs/providers/openrouter.md

Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# Perplexity
2+
3+
[Get an OpenRouter API key](https://openrouter.ai/settings/keys)
4+
5+
## Usage
6+
7+
{% code title=".env" %}
8+
```bash
9+
OPENROUTER_API_KEY=
10+
```
11+
{% endcode %}
12+
13+
```typescript
14+
import { TokenJS } from 'token.js'
15+
16+
// Create the Token.js client
17+
const tokenjs = new TokenJS()
18+
19+
async function main() {
20+
// Create a model response
21+
const completion = await tokenjs.chat.completions.create({
22+
// Specify the provider and model
23+
provider: 'openrouter',
24+
model: 'nvidia/nemotron-4-340b-instruct',
25+
// Define your message
26+
messages: [
27+
{
28+
role: 'user',
29+
content: 'Hello!',
30+
},
31+
],
32+
})
33+
console.log(completion.choices[0])
34+
}
35+
main()
36+
```
37+
38+
## Compatibility
39+
OpenRouter supports more than 180 models from a variety of providers which may have varying feature support. We recommend reviewing the OpenRouter and provider documentation for specific compatibility information.
40+
41+
## Additional Resources
42+
43+
* [Supported Models](https://openrouter.ai/models)
44+
* [OpenRouter Documentation](https://openrouter.ai/docs/quick-start)

scripts/docs/generate.ts

Lines changed: 15 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -14,10 +14,22 @@ const generateCompatibility = async () => {
1414

1515
let pushHeader = true
1616

17+
if (compatibility.generateDocs === false) {
18+
continue
19+
}
20+
21+
if (typeof compatibility.models === 'boolean') {
22+
throw new Error(
23+
'Auto-generating model compatibility tables is not supported for providers that do not have explicitly defined models.'
24+
)
25+
}
26+
1727
for (const model of compatibility.models) {
1828
const header: string[] = []
1929
const features: string[] = [model]
2030
for (const [feature, models] of Object.entries(compatibility)) {
31+
if (feature === 'generateDocs') continue
32+
2133
header.push(TableDisplayNames[feature])
2234

2335
if (feature === 'models') continue
@@ -40,7 +52,9 @@ const generateCompatibility = async () => {
4052
const mkdTable = markdownTable(table)
4153
const providerDocs = readFileSync(`docs/providers/${provider}.md`, 'utf-8')
4254
const docsSplit = providerDocs.split('<!-- compatibility -->')
43-
const afterCompatibilitySplit = docsSplit[1].split('<!-- end compatibility -->')
55+
const afterCompatibilitySplit = docsSplit[1].split(
56+
'<!-- end compatibility -->'
57+
)
4458

4559
const newDocs = `${docsSplit[0]}<!-- compatibility -->\n## Supported Models\n\n${mkdTable}\n\n${legend}<!-- end compatibility -->${afterCompatibilitySplit[1]}`
4660

src/chat/index.ts

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,7 @@ export type BedrockModel = (typeof models.bedrock.models)[number]
1717
export type MistralModel = (typeof models.mistral.models)[number]
1818
export type PerplexityModel = (typeof models.perplexity.models)[number]
1919
export type GroqModel = (typeof models.groq.models)[number]
20+
export type OpenRouterModel = string
2021

2122
export type LLMChatModel =
2223
| OpenAIModel
@@ -28,6 +29,7 @@ export type LLMChatModel =
2829
| MistralModel
2930
| PerplexityModel
3031
| GroqModel
32+
| OpenRouterModel
3133

3234
export type LLMProvider = keyof typeof models
3335

@@ -41,6 +43,7 @@ type ProviderModelMap = {
4143
mistral: MistralModel
4244
perplexity: PerplexityModel
4345
groq: GroqModel
46+
openrouter: OpenRouterModel
4447
}
4548

4649
type CompletionBase<P extends LLMProvider> = Pick<

src/handlers/base.ts

Lines changed: 16 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -8,21 +8,21 @@ import { InputError } from './types.js'
88

99
export abstract class BaseHandler<T extends LLMChatModel> {
1010
opts: ConfigOptions
11-
protected models: readonly T[]
12-
protected supportsJSON: readonly T[]
13-
protected supportsImages: readonly T[]
14-
protected supportsToolCalls: readonly T[]
11+
protected models: readonly T[] | boolean
12+
protected supportsJSON: readonly T[] | boolean
13+
protected supportsImages: readonly T[] | boolean
14+
protected supportsToolCalls: readonly T[] | boolean
1515
protected supportsN: readonly T[] | boolean
16-
protected supportsStreamingMessages: readonly T[]
16+
protected supportsStreamingMessages: readonly T[] | boolean
1717

1818
constructor(
1919
opts: ConfigOptions,
20-
models: readonly T[],
21-
supportsJSON: readonly T[],
22-
supportsImages: readonly T[],
23-
supportsToolCalls: readonly T[],
20+
models: readonly T[] | boolean,
21+
supportsJSON: readonly T[] | boolean,
22+
supportsImages: readonly T[] | boolean,
23+
supportsToolCalls: readonly T[] | boolean,
2424
suportsN: readonly T[] | boolean,
25-
supportsStreamingMessages: readonly T[]
25+
supportsStreamingMessages: readonly T[] | boolean
2626
) {
2727
this.opts = opts
2828
this.models = models
@@ -38,6 +38,10 @@ export abstract class BaseHandler<T extends LLMChatModel> {
3838
): Promise<CompletionResponse | StreamCompletionResponse>
3939

4040
protected validateInputs(body: CompletionParams): void {
41+
// We remove the provider key from the body just in case the provider does validation which errors due to it.
42+
// This can only occur on OpenAI compatible providers, but we do it for all providers for consistency.
43+
delete (body as any).provider
44+
4145
if (!this.isSupportedModel(body.model)) {
4246
throw new InputError(`Invalid 'model' field: ${body.model}.`)
4347
}
@@ -149,8 +153,8 @@ export abstract class BaseHandler<T extends LLMChatModel> {
149153

150154
// We make this public so that we can mock it in tests, which is fine because the `BaseHandler`
151155
// class isn't exposed to the user.
152-
public isSupportedModel(model: LLMChatModel): model is T {
153-
return this.models.includes(model as T)
156+
public isSupportedModel(model: string): model is T {
157+
return this.isSupportedFeature(this.models, model as T)
154158
}
155159

156160
protected supportsJSONMode(model: T): boolean {

0 commit comments

Comments
 (0)