feat(ai): 添加 AI 助手功能支持多模型对话

- 集成 OpenAI 兼容网关和 Ollama 原生 API 接口
- 新增 AI 测试页面支持流式对话和模型切换
- 配置开发环境同源反向代理解决浏览器 CORS 问题
- 添加环境变量配置支持 AI API 和 Ollama 接口设置
- 实现聊天历史记录、中断请求和参数调节功能
- 提供 Nginx 反向代理配置文档用于生产环境部署
This commit is contained in:
2026-02-27 22:15:41 +08:00
parent acec6570e1
commit b40326c3a9
9 changed files with 832 additions and 3 deletions

View File

@@ -4,3 +4,13 @@ VITE_APP_NAME=后台管理(开发环境)
#VITE_API_URL=https://cms-api.s209.websoft.top/api #VITE_API_URL=https://cms-api.s209.websoft.top/api
# AI 网关(开发环境建议走同源反代,避免浏览器 CORS
VITE_AI_API_URL=/ai-proxy
# Ollama 原生接口(开发环境建议走同源反代,避免浏览器 CORS
VITE_OLLAMA_API_URL=/ollama-proxy
# 如果 AI 网关启用了鉴权401 Not authenticated填入你的 Key仅供本机 dev server 使用)
# 不要加 VITE_ 前缀,避免被打包进前端
# AI_API_KEY=your_ai_api_key

View File

@@ -6,6 +6,19 @@ VITE_API_URL=https://your-api.com/api
VITE_SERVER_API_URL=https://your-server.com/api VITE_SERVER_API_URL=https://your-server.com/api
VITE_DOMAIN=https://your-domain.com VITE_DOMAIN=https://your-domain.com
VITE_FILE_SERVER=https://your-file-server.com VITE_FILE_SERVER=https://your-file-server.com
# AI 网关(OpenAI兼容)
# - 开发环境推荐走同源反代VITE_AI_API_URL=/ai-proxy配合 vite.config.ts
# - 生产环境可直连(需 AI 服务允许 CORS或在 Nginx 里配置 /ai-proxy 反代
VITE_AI_API_URL=https://ai-api.websoft.top/api/v1
# Ollama 原生接口(默认端口 11434
# - 开发环境推荐走同源反代VITE_OLLAMA_API_URL=/ollama-proxy配合 vite.config.ts
# - 生产环境不要直接用 http会混合内容被拦截建议 Nginx 反代成同源 https
VITE_OLLAMA_API_URL=http://47.119.165.234:11434
# 仅用于本地开发反代注入vite.config.ts 会读取并注入到 /ai-proxy 请求头)
# 不要加 VITE_ 前缀,避免被打包到前端产物里
AI_API_KEY=your_ai_api_key
# 租户配置 # 租户配置
VITE_TENANT_ID=your_tenant_id VITE_TENANT_ID=your_tenant_id

76
docs/AI_PROXY_NGINX.md Normal file
View File

@@ -0,0 +1,76 @@
# AI /ai-proxy Nginx 反代示例
前端页面 `src/views/ai/index.vue` 默认在开发环境使用 `AI_API_URL=/ai-proxy`,通过同源反代解决浏览器 CORS。
## 1) Vite 开发环境
项目已在 `vite.config.ts` 配置(默认目标可通过 `AI_PROXY_TARGET` 调整):
- `/ai-proxy/*` -> `https://ai-api.websoft.top/api/v1/*`
配合 `.env.development`
```bash
VITE_AI_API_URL=/ai-proxy
```
## 2) 生产环境Nginx 反代)
如果你的生产站点是 Nginx 托管静态文件,建议也加一条同源反代:
```nginx
location /ai-proxy/ {
proxy_pass https://ai-api.websoft.top/api/v1/;
proxy_http_version 1.1;
proxy_set_header Host ai-api.websoft.top;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
# SSE/流式输出建议关闭缓存与缓冲
proxy_buffering off;
proxy_cache off;
# 如果你的 AI 网关开启了鉴权401 Not authenticated可以在反代层固定注入
# proxy_set_header Authorization "Bearer YOUR_AI_API_KEY";
}
```
然后把生产环境的 `VITE_AI_API_URL` 配置为:
```bash
VITE_AI_API_URL=/ai-proxy
```
## 2.1) Ollama 原生接口Nginx 反代)
如果你要直接用原生 Ollama`http://<host>:11434`),生产环境同样建议走同源反代(避免 CORS + https 混合内容):
```nginx
location /ollama-proxy/ {
proxy_pass http://47.119.165.234:11434/;
proxy_http_version 1.1;
proxy_set_header Host 47.119.165.234;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_buffering off;
proxy_cache off;
}
```
然后把 `VITE_OLLAMA_API_URL` 配置为:
```bash
VITE_OLLAMA_API_URL=/ollama-proxy
```
## 3) 关于 API Key
不建议把 Key 放在浏览器里。
推荐做法:
- Key 放在你自己的后端(或 Nginx里统一注入 / 鉴权;
- 前端只请求同源 `/ai-proxy/*`

146
src/api/ai/ollama.ts Normal file
View File

@@ -0,0 +1,146 @@
import { OLLAMA_API_URL } from '@/config/setting';
export type OllamaRole = 'system' | 'user' | 'assistant' | 'tool';
export interface OllamaChatMessage {
role: OllamaRole;
content: string;
}
export interface OllamaModelTag {
name: string;
modified_at?: string;
size?: number;
digest?: string;
details?: any;
}
export interface OllamaTagsResponse {
models: OllamaModelTag[];
}
export interface OllamaChatRequest {
model: string;
messages: OllamaChatMessage[];
stream?: boolean;
options?: {
temperature?: number;
top_p?: number;
num_predict?: number;
};
}
export interface OllamaChatResponseChunk {
model?: string;
created_at?: string;
message?: { role: OllamaRole; content: string };
done?: boolean;
error?: string;
}
function normalizeBaseURL(baseURL: string) {
return baseURL.replace(/\/+$/, '');
}
export async function listOllamaModels(opts?: { baseURL?: string }) {
const baseURL = normalizeBaseURL(opts?.baseURL ?? OLLAMA_API_URL);
const res = await fetch(`${baseURL}/api/tags`, { method: 'GET' });
if (!res.ok) {
const text = await res.text().catch(() => '');
throw new Error(
`listOllamaModels failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
);
}
return (await res.json()) as OllamaTagsResponse;
}
export async function ollamaChat(
body: OllamaChatRequest,
opts?: { baseURL?: string; signal?: AbortSignal }
) {
const baseURL = normalizeBaseURL(opts?.baseURL ?? OLLAMA_API_URL);
const res = await fetch(`${baseURL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ ...body, stream: false }),
signal: opts?.signal
});
if (!res.ok) {
const text = await res.text().catch(() => '');
throw new Error(
`ollamaChat failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
);
}
return (await res.json()) as OllamaChatResponseChunk;
}
/**
* Ollama native streaming is newline-delimited JSON objects, not SSE.
*/
export async function ollamaChatStream(
body: Omit<OllamaChatRequest, 'stream'>,
opts: {
baseURL?: string;
signal?: AbortSignal;
onDelta: (text: string) => void;
onDone?: () => void;
}
) {
const baseURL = normalizeBaseURL(opts.baseURL ?? OLLAMA_API_URL);
const res = await fetch(`${baseURL}/api/chat`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({ ...body, stream: true }),
signal: opts.signal
});
if (!res.ok || !res.body) {
const text = await res.text().catch(() => '');
throw new Error(
`ollamaChatStream failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
);
}
const reader = res.body.getReader();
const decoder = new TextDecoder('utf-8');
let buffer = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
let idx = buffer.indexOf('\n');
while (idx !== -1) {
const line = buffer.slice(0, idx).trim();
buffer = buffer.slice(idx + 1);
idx = buffer.indexOf('\n');
if (!line) continue;
let json: OllamaChatResponseChunk;
try {
json = JSON.parse(line);
} catch {
continue;
}
if (json.error) {
throw new Error(json.error);
}
const delta = json.message?.content;
if (delta) {
opts.onDelta(delta);
}
if (json.done) {
opts.onDone?.();
return;
}
}
}
opts.onDone?.();
}

173
src/api/ai/openai.ts Normal file
View File

@@ -0,0 +1,173 @@
import { AI_API_URL } from '@/config/setting';
export type OpenAIRole = 'system' | 'user' | 'assistant' | 'tool';
export interface OpenAIChatMessage {
role: OpenAIRole;
content: string;
name?: string;
}
export interface OpenAIModel {
id: string;
object: string;
created?: number;
owned_by?: string;
name?: string;
}
export interface OpenAIListModelsResponse {
data: OpenAIModel[];
}
export interface OpenAIChatCompletionRequest {
model: string;
messages: OpenAIChatMessage[];
temperature?: number;
top_p?: number;
max_tokens?: number;
stream?: boolean;
}
export interface OpenAIChatCompletionChoice {
index: number;
message?: { role: OpenAIRole; content: string };
delta?: { role?: OpenAIRole; content?: string };
finish_reason?: string | null;
}
export interface OpenAIChatCompletionResponse {
id?: string;
object?: string;
created?: number;
model?: string;
choices: OpenAIChatCompletionChoice[];
}
function getHeaders(apiKey?: string) {
const headers: Record<string, string> = {
'Content-Type': 'application/json'
};
if (apiKey) {
const trimmed = apiKey.trim();
// Accept either raw token or "Bearer xxx".
headers.Authorization = /^bearer\s+/i.test(trimmed)
? trimmed
: `Bearer ${trimmed}`;
}
return headers;
}
function normalizeBaseURL(baseURL: string) {
return baseURL.replace(/\/+$/, '');
}
export async function listModels(opts?: { apiKey?: string; baseURL?: string }) {
const baseURL = normalizeBaseURL(opts?.baseURL ?? AI_API_URL);
const res = await fetch(`${baseURL}/models`, {
method: 'GET',
headers: getHeaders(opts?.apiKey)
});
if (!res.ok) {
const text = await res.text().catch(() => '');
throw new Error(
`listModels failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
);
}
return (await res.json()) as OpenAIListModelsResponse;
}
export async function chatCompletions(
body: OpenAIChatCompletionRequest,
opts?: { apiKey?: string; baseURL?: string; signal?: AbortSignal }
) {
const baseURL = normalizeBaseURL(opts?.baseURL ?? AI_API_URL);
const res = await fetch(`${baseURL}/chat/completions`, {
method: 'POST',
headers: getHeaders(opts?.apiKey),
body: JSON.stringify(body),
signal: opts?.signal
});
if (!res.ok) {
const text = await res.text().catch(() => '');
throw new Error(
`chatCompletions failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
);
}
return (await res.json()) as OpenAIChatCompletionResponse;
}
/**
* Stream OpenAI-compatible SSE (`stream: true`) and emit incremental tokens.
* Most gateways (Open-WebUI / LiteLLM / Ollama OpenAI proxy) follow:
* data: { choices: [{ delta: { content: "..." } }] }
* data: [DONE]
*/
export async function chatCompletionsStream(
body: Omit<OpenAIChatCompletionRequest, 'stream'>,
opts: {
apiKey?: string;
baseURL?: string;
signal?: AbortSignal;
onDelta: (text: string) => void;
onDone?: () => void;
}
) {
const baseURL = normalizeBaseURL(opts.baseURL ?? AI_API_URL);
const res = await fetch(`${baseURL}/chat/completions`, {
method: 'POST',
headers: getHeaders(opts.apiKey),
body: JSON.stringify({ ...body, stream: true }),
signal: opts.signal
});
if (!res.ok || !res.body) {
const text = await res.text().catch(() => '');
throw new Error(
`chatCompletionsStream failed: ${res.status} ${res.statusText}${text ? ` - ${text}` : ''}`
);
}
const reader = res.body.getReader();
const decoder = new TextDecoder('utf-8');
let buffer = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
// SSE events are separated by blank lines.
let idx = buffer.indexOf('\n\n');
while (idx !== -1) {
const rawEvent = buffer.slice(0, idx);
buffer = buffer.slice(idx + 2);
idx = buffer.indexOf('\n\n');
const lines = rawEvent
.split('\n')
.map((l) => l.trim())
.filter(Boolean);
for (const line of lines) {
if (!line.startsWith('data:')) continue;
const data = line.slice(5).trim();
if (!data) continue;
if (data === '[DONE]') {
opts.onDone?.();
return;
}
try {
const json = JSON.parse(data) as OpenAIChatCompletionResponse;
const delta = json.choices?.[0]?.delta?.content;
if (delta) {
opts.onDelta(delta);
}
} catch {
// Ignore malformed chunks
}
}
}
}
opts.onDone?.();
}

View File

@@ -16,6 +16,18 @@ export const MODULES_API_URL =
export const FILE_SERVER = export const FILE_SERVER =
import.meta.env.VITE_FILE_SERVER || 'https://your-file-server.com'; import.meta.env.VITE_FILE_SERVER || 'https://your-file-server.com';
// OpenAI-compatible gateway (Ollama/Open-WebUI/LiteLLM etc.)
export const AI_API_URL =
import.meta.env.VITE_AI_API_URL ||
// Prefer same-origin reverse proxy during local development to avoid CORS.
(import.meta.env.DEV ? '/ai-proxy' : 'https://ai-api.websoft.top/api/v1');
// Ollama native API endpoint (usually http://host:11434).
// Note: browsers cannot call http from an https site (mixed-content); prefer same-origin proxy.
export const OLLAMA_API_URL =
import.meta.env.VITE_OLLAMA_API_URL ||
(import.meta.env.DEV ? '/ollama-proxy' : 'http://47.119.165.234:11434');
/** /**
* 以下配置一般不需要修改 * 以下配置一般不需要修改
*/ */

View File

@@ -60,6 +60,12 @@ export const routes = [
component: () => import('@/views/led/index.vue'), component: () => import('@/views/led/index.vue'),
meta: { title: '医生出诊信息表' } meta: { title: '医生出诊信息表' }
}, },
// AI 测试页面(无需菜单即可访问,登录后直接打开 /ai-test
{
path: '/ai-test',
component: () => import('@/views/ai/index.vue'),
meta: { title: 'AI 测试' }
},
// { // {
// path: '/forget', // path: '/forget',
// component: () => import('@/views/passport/forget/index.vue'), // component: () => import('@/views/passport/forget/index.vue'),

358
src/views/ai/index.vue Normal file
View File

@@ -0,0 +1,358 @@
<script setup lang="ts">
import { computed, onBeforeUnmount, ref, watch } from 'vue';
import { message } from 'ant-design-vue';
import {
chatCompletions,
chatCompletionsStream,
listModels,
type OpenAIChatMessage,
type OpenAIModel
} from '@/api/ai/openai';
import { AI_API_URL, OLLAMA_API_URL } from '@/config/setting';
import {
listOllamaModels,
ollamaChat,
ollamaChatStream,
type OllamaChatMessage
} from '@/api/ai/ollama';
type Msg = OpenAIChatMessage;
type Provider = 'openai' | 'ollama';
const provider = ref<Provider>('ollama');
// Default to Ollama native API when provider is ollama.
const baseURL = ref(
provider.value === 'openai' ? AI_API_URL : OLLAMA_API_URL
);
const apiKey = ref<string>('');
const modelLoading = ref(false);
const models = ref<OpenAIModel[]>([]);
const modelId = ref<string>('');
const systemPrompt = ref<string>('你是一个有帮助的助手。');
const userPrompt = ref<string>('你好,介绍一下你能做什么。');
const temperature = ref<number>(0.7);
const stream = ref<boolean>(true);
const sending = ref(false);
const assistantText = ref<string>('');
const errorText = ref<string>('');
const history = ref<Msg[]>([]);
const historyJson = computed(() => JSON.stringify(history.value, null, 2));
const canSend = computed(() => {
return !!modelId.value && !!userPrompt.value.trim() && !sending.value;
});
const abortController = ref<AbortController | null>(null);
const stop = () => {
abortController.value?.abort();
abortController.value = null;
sending.value = false;
};
const loadModels = async () => {
modelLoading.value = true;
errorText.value = '';
try {
if (provider.value === 'openai') {
if (!baseURL.value.trim()) {
baseURL.value = AI_API_URL;
}
const res = await listModels({
baseURL: baseURL.value.trim() || AI_API_URL,
apiKey: apiKey.value.trim() || undefined
});
models.value = res.data ?? [];
if (!modelId.value && models.value.length) {
modelId.value = models.value[0].id;
}
} else {
if (!baseURL.value.trim()) {
baseURL.value = OLLAMA_API_URL;
}
const res = await listOllamaModels({
baseURL: baseURL.value.trim() || OLLAMA_API_URL
});
models.value = (res.models ?? []).map((m) => ({
id: m.name,
name: m.name,
object: 'model'
}));
if (!modelId.value && models.value.length) {
modelId.value = models.value[0].id;
}
}
} catch (e: any) {
errorText.value = e?.message ?? String(e);
if (
provider.value === 'openai' &&
String(errorText.value).includes('401')
) {
message.error(
'未认证(401):请填写 API Key或在本地用 AI_API_KEY 通过 /ai-proxy 注入'
);
} else {
message.error('加载模型列表失败');
}
} finally {
modelLoading.value = false;
}
};
const clearChat = () => {
history.value = [];
assistantText.value = '';
errorText.value = '';
};
const send = async () => {
if (!canSend.value) return;
sending.value = true;
assistantText.value = '';
errorText.value = '';
const system: Msg = { role: 'system', content: systemPrompt.value.trim() };
const user: Msg = { role: 'user', content: userPrompt.value.trim() };
const messages: Msg[] = [
...(system.content ? [system] : []),
...history.value,
user
];
const controller = new AbortController();
abortController.value = controller;
try {
if (provider.value === 'openai') {
if (stream.value) {
await chatCompletionsStream(
{
model: modelId.value,
messages,
temperature: temperature.value
},
{
baseURL: baseURL.value.trim() || AI_API_URL,
apiKey: apiKey.value.trim() || undefined,
signal: controller.signal,
onDelta: (t) => {
assistantText.value += t;
}
}
);
} else {
const res = await chatCompletions(
{
model: modelId.value,
messages,
temperature: temperature.value
},
{
baseURL: baseURL.value.trim() || AI_API_URL,
apiKey: apiKey.value.trim() || undefined,
signal: controller.signal
}
);
assistantText.value = res.choices?.[0]?.message?.content ?? '';
}
} else {
const ollamaMessages: OllamaChatMessage[] = messages.map((m) => ({
role: m.role as any,
content: m.content
}));
if (stream.value) {
await ollamaChatStream(
{
model: modelId.value,
messages: ollamaMessages,
options: { temperature: temperature.value }
},
{
baseURL: baseURL.value.trim() || OLLAMA_API_URL,
signal: controller.signal,
onDelta: (t) => {
assistantText.value += t;
}
}
);
} else {
const res = await ollamaChat(
{
model: modelId.value,
messages: ollamaMessages,
options: { temperature: temperature.value }
},
{
baseURL: baseURL.value.trim() || OLLAMA_API_URL,
signal: controller.signal
}
);
assistantText.value = res.message?.content ?? '';
}
}
history.value = [
...messages,
{ role: 'assistant', content: assistantText.value }
];
userPrompt.value = '';
} catch (e: any) {
// Abort is expected when clicking "Stop".
if (e?.name !== 'AbortError') {
errorText.value = e?.message ?? String(e);
message.error('请求失败(可能是 CORS 或鉴权问题)');
}
} finally {
sending.value = false;
abortController.value = null;
}
};
const modelOptions = computed(() => {
return models.value.map((m) => ({
label: m.name ?? m.id,
value: m.id
}));
});
// Load once for convenience; if the gateway blocks CORS you can still paste output from curl.
loadModels();
watch(
() => provider.value,
(p) => {
stop();
clearChat();
models.value = [];
modelId.value = '';
errorText.value = '';
baseURL.value = p === 'openai' ? AI_API_URL : OLLAMA_API_URL;
loadModels();
}
);
onBeforeUnmount(() => {
stop();
});
</script>
<template>
<div class="ele-body ele-body-card">
<a-card :bordered="false" :body-style="{ padding: '16px' }">
<a-space direction="vertical" style="width: 100%" :size="12">
<a-alert
type="info"
show-icon
message="AI 助手"
description="支持Qwen3.5、DeepSeek、Gemini3等主流的开源大模型免费使用"
/>
<a-row :gutter="12">
<!-- <a-col :xs="24" :md="6">-->
<!-- <a-select-->
<!-- v-model:value="provider"-->
<!-- :options="[-->
<!-- { label: 'OpenAI兼容(/v1)', value: 'openai' },-->
<!-- { label: 'Ollama原生(/api)', value: 'ollama' }-->
<!-- ]"-->
<!-- style="width: 100%"-->
<!-- />-->
<!-- </a-col>-->
<!-- <a-col :xs="24" :md="12">-->
<!-- <a-input-->
<!-- v-model:value="baseURL"-->
<!-- addon-before="BaseURL"-->
<!-- placeholder="https://ai-api.websoft.top/api/v1"-->
<!-- />-->
<!-- </a-col>-->
<a-col :xs="24" :md="12" v-if="provider === 'openai'">
<a-input-password
v-model:value="apiKey"
addon-before="API Key"
placeholder="可选(不建议在前端保存)"
/>
</a-col>
</a-row>
<a-row :gutter="12">
<a-col :xs="24" :md="12">
<a-select
v-model:value="modelId"
:options="modelOptions"
:loading="modelLoading"
placeholder="选择模型"
style="width: 100%"
/>
</a-col>
<a-col :xs="24" :md="12">
<a-space>
<a-button :loading="modelLoading" @click="loadModels"
>刷新模型</a-button
>
<span>流式</span>
<a-switch v-model:checked="stream" />
<span>温度</span>
<a-input-number
v-model:value="temperature"
:min="0"
:max="2"
:step="0.1"
/>
</a-space>
</a-col>
</a-row>
<!-- <a-textarea-->
<!-- v-model:value="systemPrompt"-->
<!-- :auto-size="{ minRows: 2, maxRows: 6 }"-->
<!-- placeholder="System Prompt可选"-->
<!-- />-->
<a-textarea
v-model:value="userPrompt"
:auto-size="{ minRows: 3, maxRows: 10 }"
placeholder="输入要问的问题..."
@pressEnter.exact.prevent="send"
/>
<a-space>
<a-button
type="primary"
:disabled="!canSend"
:loading="sending"
@click="send"
>发送</a-button
>
<a-button v-if="sending" danger @click="stop">停止</a-button>
<a-button @click="clearChat">清空</a-button>
</a-space>
<a-alert v-if="errorText" type="error" show-icon :message="errorText" />
<a-divider>输出</a-divider>
<a-card size="small" :bordered="true">
<pre class="output">{{ assistantText }}</pre>
</a-card>
<!-- <a-divider>历史最后一次请求</a-divider>-->
<!-- <a-card size="small" :bordered="true">-->
<!-- <pre class="output">{{ historyJson }}</pre>-->
<!-- </a-card>-->
</a-space>
</a-card>
</div>
</template>
<style scoped lang="less">
.output {
margin: 0;
white-space: pre-wrap;
word-break: break-word;
}
</style>

View File

@@ -1,4 +1,4 @@
import { defineConfig } from 'vite'; import { defineConfig, loadEnv } from 'vite';
import vue from '@vitejs/plugin-vue'; import vue from '@vitejs/plugin-vue';
import ViteCompression from 'vite-plugin-compression'; import ViteCompression from 'vite-plugin-compression';
import ViteComponents from 'unplugin-vue-components/vite'; import ViteComponents from 'unplugin-vue-components/vite';
@@ -44,7 +44,9 @@ function getSmartPort() {
} }
} }
export default defineConfig(({ command }) => { export default defineConfig(({ command, mode }) => {
// Load env for Vite config usage (including non-VITE_ keys like AI_API_KEY).
const env = loadEnv(mode, process.cwd(), '');
const isBuild = command === 'build'; const isBuild = command === 'build';
// 智能端口配置(仅在开发模式下) // 智能端口配置(仅在开发模式下)
@@ -68,7 +70,7 @@ export default defineConfig(({ command }) => {
// 代理配置 // 代理配置
proxy: { proxy: {
'/api': { '/api': {
target: process.env.VITE_API_URL || 'https://server.websoft.top', target: env.VITE_API_URL || process.env.VITE_API_URL || 'https://server.websoft.top',
changeOrigin: true, changeOrigin: true,
secure: false, secure: false,
configure: (proxy, _options) => { configure: (proxy, _options) => {
@@ -82,6 +84,39 @@ export default defineConfig(({ command }) => {
console.log('Received Response from the Target:', proxyRes.statusCode, req.url); console.log('Received Response from the Target:', proxyRes.statusCode, req.url);
}); });
}, },
},
// OpenAI-compatible gateway reverse proxy (dev only).
// Example:
// GET /ai-proxy/models -> https://ai.websoft.top/api/v1/models
// POST /ai-proxy/chat/completions -> https://ai.websoft.top/api/v1/chat/completions
'/ai-proxy': {
target: env.AI_PROXY_TARGET || 'https://ai-api.websoft.top',
changeOrigin: true,
secure: false,
rewrite: (path) =>
path.replace(/^\/ai-proxy/, env.AI_PROXY_REWRITE_PREFIX || '/api/v1'),
configure: (proxy) => {
proxy.on('proxyReq', (proxyReq) => {
// Inject auth for local dev to avoid putting API keys in the browser.
const key = env.AI_API_KEY || process.env.AI_API_KEY;
if (key && !proxyReq.getHeader('Authorization')) {
const trimmed = String(key).trim();
const value = /^bearer\\s+/i.test(trimmed)
? trimmed
: `Bearer ${trimmed}`;
proxyReq.setHeader('Authorization', value);
}
});
}
},
// Ollama native API reverse proxy (dev only).
// GET /ollama-proxy/api/tags -> http://47.119.165.234:11434/api/tags
// POST /ollama-proxy/api/chat -> http://47.119.165.234:11434/api/chat
'/ollama-proxy': {
target: 'http://47.119.165.234:11434',
changeOrigin: true,
secure: false,
rewrite: (path) => path.replace(/^\/ollama-proxy/, '')
} }
}, },
// 端口冲突时的处理 // 端口冲突时的处理