- 集成 OpenAI 兼容网关和 Ollama 原生 API 接口 - 新增 AI 测试页面支持流式对话和模型切换 - 配置开发环境同源反向代理解决浏览器 CORS 问题 - 添加环境变量配置支持 AI API 和 Ollama 接口设置 - 实现聊天历史记录、中断请求和参数调节功能 - 提供 Nginx 反向代理配置文档用于生产环境部署
174 lines
4.5 KiB
TypeScript
174 lines
4.5 KiB
TypeScript
import { AI_API_URL } from '@/config/setting';
|
|
|
|
export type OpenAIRole = 'system' | 'user' | 'assistant' | 'tool';
|
|
|
|
export interface OpenAIChatMessage {
|
|
role: OpenAIRole;
|
|
content: string;
|
|
name?: string;
|
|
}
|
|
|
|
export interface OpenAIModel {
|
|
id: string;
|
|
object: string;
|
|
created?: number;
|
|
owned_by?: string;
|
|
name?: string;
|
|
}
|
|
|
|
export interface OpenAIListModelsResponse {
|
|
data: OpenAIModel[];
|
|
}
|
|
|
|
export interface OpenAIChatCompletionRequest {
|
|
model: string;
|
|
messages: OpenAIChatMessage[];
|
|
temperature?: number;
|
|
top_p?: number;
|
|
max_tokens?: number;
|
|
stream?: boolean;
|
|
}
|
|
|
|
export interface OpenAIChatCompletionChoice {
|
|
index: number;
|
|
message?: { role: OpenAIRole; content: string };
|
|
delta?: { role?: OpenAIRole; content?: string };
|
|
finish_reason?: string | null;
|
|
}
|
|
|
|
export interface OpenAIChatCompletionResponse {
|
|
id?: string;
|
|
object?: string;
|
|
created?: number;
|
|
model?: string;
|
|
choices: OpenAIChatCompletionChoice[];
|
|
}
|
|
|
|
function getHeaders(apiKey?: string) {
|
|
const headers: Record<string, string> = {
|
|
'Content-Type': 'application/json'
|
|
};
|
|
if (apiKey) {
|
|
const trimmed = apiKey.trim();
|
|
// Accept either raw token or "Bearer xxx".
|
|
headers.Authorization = /^bearer\s+/i.test(trimmed)
|
|
? trimmed
|
|
: `Bearer ${trimmed}`;
|
|
}
|
|
return headers;
|
|
}
|
|
|
|
function normalizeBaseURL(baseURL: string) {
|
|
return baseURL.replace(/\/+$/, '');
|
|
}
|
|
|
|
export async function listModels(opts?: { apiKey?: string; baseURL?: string }) {
|
|
const baseURL = normalizeBaseURL(opts?.baseURL ?? AI_API_URL);
|
|
const res = await fetch(`${baseURL}/models`, {
|
|
method: 'GET',
|
|
headers: getHeaders(opts?.apiKey)
|
|
});
|
|
if (!res.ok) {
|
|
const text = await res.text().catch(() => '');
|
|
throw new Error(
|
|
`listModels failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
|
|
);
|
|
}
|
|
return (await res.json()) as OpenAIListModelsResponse;
|
|
}
|
|
|
|
export async function chatCompletions(
|
|
body: OpenAIChatCompletionRequest,
|
|
opts?: { apiKey?: string; baseURL?: string; signal?: AbortSignal }
|
|
) {
|
|
const baseURL = normalizeBaseURL(opts?.baseURL ?? AI_API_URL);
|
|
const res = await fetch(`${baseURL}/chat/completions`, {
|
|
method: 'POST',
|
|
headers: getHeaders(opts?.apiKey),
|
|
body: JSON.stringify(body),
|
|
signal: opts?.signal
|
|
});
|
|
if (!res.ok) {
|
|
const text = await res.text().catch(() => '');
|
|
throw new Error(
|
|
`chatCompletions failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
|
|
);
|
|
}
|
|
return (await res.json()) as OpenAIChatCompletionResponse;
|
|
}
|
|
|
|
/**
|
|
* Stream OpenAI-compatible SSE (`stream: true`) and emit incremental tokens.
|
|
* Most gateways (Open-WebUI / LiteLLM / Ollama OpenAI proxy) follow:
|
|
* data: { choices: [{ delta: { content: "..." } }] }
|
|
* data: [DONE]
|
|
*/
|
|
export async function chatCompletionsStream(
|
|
body: Omit<OpenAIChatCompletionRequest, 'stream'>,
|
|
opts: {
|
|
apiKey?: string;
|
|
baseURL?: string;
|
|
signal?: AbortSignal;
|
|
onDelta: (text: string) => void;
|
|
onDone?: () => void;
|
|
}
|
|
) {
|
|
const baseURL = normalizeBaseURL(opts.baseURL ?? AI_API_URL);
|
|
const res = await fetch(`${baseURL}/chat/completions`, {
|
|
method: 'POST',
|
|
headers: getHeaders(opts.apiKey),
|
|
body: JSON.stringify({ ...body, stream: true }),
|
|
signal: opts.signal
|
|
});
|
|
|
|
if (!res.ok || !res.body) {
|
|
const text = await res.text().catch(() => '');
|
|
throw new Error(
|
|
`chatCompletionsStream failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
|
|
);
|
|
}
|
|
|
|
const reader = res.body.getReader();
|
|
const decoder = new TextDecoder('utf-8');
|
|
let buffer = '';
|
|
|
|
while (true) {
|
|
const { value, done } = await reader.read();
|
|
if (done) break;
|
|
buffer += decoder.decode(value, { stream: true });
|
|
|
|
// SSE events are separated by blank lines.
|
|
let idx = buffer.indexOf('\n\n');
|
|
while (idx !== -1) {
|
|
const rawEvent = buffer.slice(0, idx);
|
|
buffer = buffer.slice(idx + 2);
|
|
idx = buffer.indexOf('\n\n');
|
|
|
|
const lines = rawEvent
|
|
.split('\n')
|
|
.map((l) => l.trim())
|
|
.filter(Boolean);
|
|
for (const line of lines) {
|
|
if (!line.startsWith('data:')) continue;
|
|
const data = line.slice(5).trim();
|
|
if (!data) continue;
|
|
if (data === '[DONE]') {
|
|
opts.onDone?.();
|
|
return;
|
|
}
|
|
try {
|
|
const json = JSON.parse(data) as OpenAIChatCompletionResponse;
|
|
const delta = json.choices?.[0]?.delta?.content;
|
|
if (delta) {
|
|
opts.onDelta(delta);
|
|
}
|
|
} catch {
|
|
// Ignore malformed chunks
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
opts.onDone?.();
|
|
}
|