Initial Commit

This commit is contained in:
2025-06-27 22:45:20 -05:00
parent f35b07ecca
commit 6a5c84b609
9 changed files with 5884 additions and 2 deletions

BIN
.gitignore vendored Normal file

Binary file not shown.

View File

@@ -1,3 +1,76 @@
# gemini-cli-openai-api
# Gemini ↔︎ OpenAI Proxy
OpenAI API compatible proxy to expose gemini-cli models
Serve **Google Gemini 2.5 Pro** (or Flash) through an **OpenAI-compatible API**.
Plug-and-play with clients that already speak OpenAI—SillyTavern, llama.cpp, LangChain, the VS Code *Cline* extension, etc.
---
## ✨ Features
| ✔ | Feature | Notes |
|---|---------|-------|
| `/v1/chat/completions` | Non-stream & stream (SSE) | Works with curl, ST, LangChain… |
| Vision support | `image_url` → Gemini `inlineData` | |
| Function / Tool calling | OpenAI “functions” → Gemini Tool Registry | |
| Reasoning / chain-of-thought | Sends `enable_thoughts:true`, streams `<think>` chunks | ST shows grey bubbles |
| 1 M-token context | Proxy auto-lifts Gemini CLIs default 200 k cap | |
| CORS | Enabled (`*`) by default | Ready for browser apps |
| Zero external deps | Node 22 + TypeScript only | No Express |
---
## 🚀 Quick start (local)
```bash
git clone https://huggingface.co/engineofperplexity/gemini-openai-proxy
cd gemini-openai-proxy
npm ci # install deps & ts-node
# launch on port 11434
npx ts-node src/server.ts
Optional env vars
PORT=3000change listen port
GEMINI_API_KEY=<key>use your own key
Minimal curl test
bash
Copy
Edit
curl -X POST http://localhost:11434/v1/chat/completions \
-H "Content-Type: application/json" \
-d '{
"model": "gemini-2.5-pro-latest",
"messages":[{"role":"user","content":"Hello Gemini!"}]
}'
SillyTavern settings
Field Value
API Base URL http://127.0.0.1:11434/v1
Model gemini-2.5-pro-latest
Streaming On
Reasoning On → grey <think> lines appear
🐳 Docker
bash
Copy
Edit
# build once
docker build -t gemini-openai-proxy .
# run
docker run -p 11434:11434 \
-e GEMINI_API_KEY=$GEMINI_API_KEY \
gemini-openai-proxy
🗂 Project layout
pgsql
Copy
Edit
src/
server.ts minimalist HTTP server
mapper.ts OpenAI ⇄ Gemini transforms
chatwrapper.ts thin wrapper around @google/genai
remoteimage.ts fetch + base64 for vision
package.json deps & scripts
Dockerfile
README.md
📜 License
MIT free for personal & commercial use.

5452
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

20
package.json Normal file
View File

@@ -0,0 +1,20 @@
{
"name": "gemini-cli-openai-api",
"version": "1.0.0",
"main": "server.ts",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "Stefano Fiorini",
"license": "MIT",
"description": "",
"dependencies": {
"@google/gemini-cli": "^0.1.3"
},
"devDependencies": {
"@types/node": "^24.0.4",
"ts-node": "^10.9.2",
"typescript": "^5.8.3"
}
}

69
src/chatwrapper.ts Normal file
View File

@@ -0,0 +1,69 @@
// src/chatwrapper.ts
import {
AuthType,
createContentGeneratorConfig,
createContentGenerator,
} from '@google/gemini-cli-core/dist/src/core/contentGenerator.js';
/* ------------------------------------------------------------------ */
/* 1. Build the ContentGenerator exactly like the CLI does */
/* ------------------------------------------------------------------ */
let modelName: string; // we'll fill this once
const generatorPromise = (async () => {
// Pass undefined for model so the helper falls back to DEFAULT_GEMINI_MODEL
const cfg = await createContentGeneratorConfig(
undefined, // let helper pick default (Gemini-2.5-Pro)
AuthType.LOGIN_WITH_GOOGLE_PERSONAL // same mode the CLI defaults to
);
modelName = cfg.model; // remember the actual model string
return await createContentGenerator(cfg);
})();
/* ------------------------------------------------------------------ */
/* 2. Helpers consumed by server.ts */
/* ------------------------------------------------------------------ */
type GenConfig = Record<string, unknown>;
export async function sendChat({
contents,
generationConfig = {},
}: {
contents: any[];
generationConfig?: GenConfig;
tools?: unknown; // accepted but ignored for now
}) {
const generator: any = await generatorPromise;
return await generator.generateContent({
model: modelName,
contents,
config: generationConfig,
});
}
export async function* sendChatStream({
contents,
generationConfig = {},
}: {
contents: any[];
generationConfig?: GenConfig;
tools?: unknown;
}) {
const generator: any = await generatorPromise;
const stream = await generator.generateContentStream({
model: modelName,
contents,
config: generationConfig,
});
for await (const chunk of stream) yield chunk;
}
/* ------------------------------------------------------------------ */
/* 3. Minimal stubs so server.ts compiles (extend later) */
/* ------------------------------------------------------------------ */
export function listModels() {
return [{ id: modelName }];
}
export async function embed(_input: unknown) {
throw new Error('Embeddings endpoint not implemented yet.');
}

124
src/mapper.ts Normal file
View File

@@ -0,0 +1,124 @@
/* ------------------------------------------------------------------ */
/* mapper.ts OpenAI ⇆ Gemini (with reasoning/1 M context) */
/* ------------------------------------------------------------------ */
import { fetchAndEncode } from './remoteimage';
import { z } from 'zod';
import { ToolRegistry } from '@google/gemini-cli-core/dist/src/tools/tool-registry.js';
/* ------------------------------------------------------------------ */
type Part = { text?: string; inlineData?: { mimeType: string; data: string } };
/* ------------------------------------------------------------------ */
async function callLocalFunction(_name: string, _args: unknown) {
return { ok: true };
}
/* ================================================================== */
/* Request mapper: OpenAI ➞ Gemini */
/* ================================================================== */
export async function mapRequest(body: any) {
const parts: Part[] = [];
/* ---- convert messages & vision --------------------------------- */
for (const m of body.messages) {
if (Array.isArray(m.content)) {
for (const item of m.content) {
if (item.type === 'image_url') {
parts.push({ inlineData: await fetchAndEncode(item.image_url.url) });
} else if (item.type === 'text') {
parts.push({ text: item.text });
}
}
} else {
parts.push({ text: m.content });
}
}
/* ---- base generationConfig ------------------------------------- */
const generationConfig: Record<string, unknown> = {
temperature: body.temperature,
maxOutputTokens: body.max_tokens,
topP: body.top_p,
...(body.generationConfig ?? {}), // copy anything ST already merged
};
if (body.include_reasoning === true) {
generationConfig.enable_thoughts = true; // ← current flag
generationConfig.thinking_budget ??= 2048; // optional limit
}
/* ---- auto-enable reasoning & 1 M context ----------------------- */
if (body.include_reasoning === true && generationConfig.thinking !== true) {
generationConfig.thinking = true;
generationConfig.thinking_budget ??= 2048;
}
generationConfig.maxInputTokens ??= 1_000_000; // lift context cap
const geminiReq = {
contents: [{ role: 'user', parts }],
generationConfig,
stream: body.stream,
};
/* ---- Tool / function mapping ----------------------------------- */
const tools = new ToolRegistry({} as any);
if (body.functions?.length) {
const reg = tools as any;
body.functions.forEach((fn: any) =>
reg.registerTool(
fn.name,
{
title: fn.name,
description: fn.description ?? '',
inputSchema: z.object(fn.parameters?.properties ?? {}),
},
async (args: unknown) => callLocalFunction(fn.name, args),
),
);
}
return { geminiReq, tools };
}
/* ================================================================== */
/* Non-stream response: Gemini ➞ OpenAI */
/* ================================================================== */
export function mapResponse(gResp: any) {
const usage = gResp.usageMetadata ?? {};
return {
id: `chatcmpl-${Date.now()}`,
object: 'chat.completion',
created: Math.floor(Date.now() / 1000),
model: 'gemini-2.5-pro-latest',
choices: [
{
index: 0,
message: { role: 'assistant', content: gResp.text },
finish_reason: 'stop',
},
],
usage: {
prompt_tokens: usage.promptTokens ?? 0,
completion_tokens: usage.candidatesTokens ?? 0,
total_tokens: usage.totalTokens ?? 0,
},
};
}
/* ================================================================== */
/* Stream chunk mapper: Gemini ➞ OpenAI */
/* ================================================================== */
export function mapStreamChunk(chunk: any) {
const part = chunk?.candidates?.[0]?.content?.parts?.[0] ?? {};
const delta: any = { role: 'assistant' };
if (part.thought === true) {
delta.content = `<think>${part.text ?? ''}`; // ST renders grey bubble
} else if (typeof part.text === 'string') {
delta.content = part.text;
}
return { choices: [ { delta, index: 0 } ] };
}

9
src/remoteimage.ts Normal file
View File

@@ -0,0 +1,9 @@
import { fetch } from 'undici'; // Node ≥18 has global fetch; otherwise add undici
export async function fetchAndEncode(url: string) {
const res = await fetch(url);
if (!res.ok) throw new Error(`Failed to fetch image: ${url}`);
const buf = Buffer.from(await res.arrayBuffer());
const mimeType = res.headers.get('content-type') || 'image/png';
return { mimeType, data: buf.toString('base64') };
}

118
src/server.ts Normal file
View File

@@ -0,0 +1,118 @@
import http from 'http';
import { sendChat, sendChatStream } from './chatwrapper';
import { mapRequest, mapResponse, mapStreamChunk } from './mapper';
/* ── basic config ─────────────────────────────────────────────────── */
const PORT = Number(process.env.PORT ?? 11434);
/* ── CORS helper ──────────────────────────────────────────────────── */
function allowCors(res: http.ServerResponse) {
res.setHeader('Access-Control-Allow-Origin', '*');
res.setHeader('Access-Control-Allow-Headers', '*');
res.setHeader('Access-Control-Allow-Methods', 'GET,POST,OPTIONS');
}
/* ── JSON body helper ─────────────────────────────────────────────── */
function readJSON(
req: http.IncomingMessage,
res: http.ServerResponse,
): Promise<any | null> {
return new Promise((resolve) => {
let data = '';
req.on('data', (c) => (data += c));
req.on('end', () => {
if (!data) {
if (req.method === 'POST') {
res.writeHead(400, { 'Content-Type': 'application/json' });
res.end(
JSON.stringify({
error: { message: 'Request body is missing for POST request' },
}),
);
}
return resolve(null);
}
try {
resolve(JSON.parse(data));
} catch {
res.writeHead(400, { 'Content-Type': 'application/json' }); // malformed JSON
res.end(JSON.stringify({ error: { message: 'Malformed JSON' } }));
resolve(null);
}
});
});
}
/* ── server ───────────────────────────────────────────────────────── */
http
.createServer(async (req, res) => {
allowCors(res);
const url = new URL(req.url ?? '/', `http://${req.headers.host}`);
const pathname = url.pathname.replace(/\/$/, '') || '/';
console.log(`[proxy] ${req.method} ${url.pathname}`);
/* -------- pre-flight ---------- */
if (req.method === 'OPTIONS') {
res.writeHead(204).end();
return;
}
/* -------- /v1/models ---------- */
if (pathname === "/v1/models" || pathname === "/models") {
res.writeHead(200, { "Content-Type": "application/json" });
res.end(
JSON.stringify({
data: [
{
id: "gemini-2.5-pro",
object: "model",
owned_by: "google",
},
],
})
);
return;
}
/* ---- /v1/chat/completions ---- */
if (
(pathname === "/chat/completions" ||
(pathname === "/v1/chat/completions" ) && req.method === "POST")
) {
const body = await readJSON(req, res);
console.log("Request body:", body);
if (!body) return;
try {
const { geminiReq, tools } = await mapRequest(body);
console.log("Mapped Gemini request:", geminiReq);
if (body.stream) {
res.writeHead(200, {
"Content-Type": "text/event-stream",
"Cache-Control": "no-cache",
Connection: "keep-alive",
});
for await (const chunk of sendChatStream({ ...geminiReq, tools })) {
console.log("Stream chunk:", chunk);
res.write(`data: ${JSON.stringify(mapStreamChunk(chunk))}\n\n`);
}
res.end("data: [DONE]\n\n");
} else {
const gResp = await sendChat({ ...geminiReq, tools });
res.writeHead(200, { "Content-Type": "application/json" });
res.end(JSON.stringify(mapResponse(gResp)));
}
} catch (err: any) {
console.error("Proxy error ➜", err);
res.writeHead(500, { "Content-Type": "application/json" });
res.end(JSON.stringify({ error: { message: err.message } }));
}
return;
}
/* ---- anything else ---------- */
res.writeHead(404).end();
})
.listen(PORT, () => console.log(`OpenAI proxy on :${PORT}`));

17
tsconfig.json Normal file
View File

@@ -0,0 +1,17 @@
{
"compilerOptions": {
/* ---- target & module ---- */
"target": "ES2020",
"module": "commonjs",
"moduleResolution": "node",
/* ---- quality-of-life ---- */
"esModuleInterop": true,
"skipLibCheck": true,
"forceConsistentCasingInFileNames": true,
/* ---- output dir ---- */
"outDir": "dist"
},
"include": ["src/**/*"]
}