feat(ai): 添加 AI 助手功能支持多模型对话

- 集成 OpenAI 兼容网关和 Ollama 原生 API 接口
- 新增 AI 测试页面支持流式对话和模型切换
- 配置开发环境同源反向代理解决浏览器 CORS 问题
- 添加环境变量配置支持 AI API 和 Ollama 接口设置
- 实现聊天历史记录、中断请求和参数调节功能
- 提供 Nginx 反向代理配置文档用于生产环境部署
This commit is contained in:
2026-02-27 22:15:41 +08:00
parent acec6570e1
commit b40326c3a9
9 changed files with 832 additions and 3 deletions

358
src/views/ai/index.vue Normal file
View File

@@ -0,0 +1,358 @@
<script setup lang="ts">
import { computed, onBeforeUnmount, ref, watch } from 'vue';
import { message } from 'ant-design-vue';
import {
chatCompletions,
chatCompletionsStream,
listModels,
type OpenAIChatMessage,
type OpenAIModel
} from '@/api/ai/openai';
import { AI_API_URL, OLLAMA_API_URL } from '@/config/setting';
import {
listOllamaModels,
ollamaChat,
ollamaChatStream,
type OllamaChatMessage
} from '@/api/ai/ollama';
type Msg = OpenAIChatMessage;
type Provider = 'openai' | 'ollama';
const provider = ref<Provider>('ollama');
// Default to Ollama native API when provider is ollama.
const baseURL = ref(
provider.value === 'openai' ? AI_API_URL : OLLAMA_API_URL
);
const apiKey = ref<string>('');
const modelLoading = ref(false);
const models = ref<OpenAIModel[]>([]);
const modelId = ref<string>('');
const systemPrompt = ref<string>('你是一个有帮助的助手。');
const userPrompt = ref<string>('你好,介绍一下你能做什么。');
const temperature = ref<number>(0.7);
const stream = ref<boolean>(true);
const sending = ref(false);
const assistantText = ref<string>('');
const errorText = ref<string>('');
const history = ref<Msg[]>([]);
const historyJson = computed(() => JSON.stringify(history.value, null, 2));
const canSend = computed(() => {
return !!modelId.value && !!userPrompt.value.trim() && !sending.value;
});
const abortController = ref<AbortController | null>(null);
const stop = () => {
abortController.value?.abort();
abortController.value = null;
sending.value = false;
};
const loadModels = async () => {
modelLoading.value = true;
errorText.value = '';
try {
if (provider.value === 'openai') {
if (!baseURL.value.trim()) {
baseURL.value = AI_API_URL;
}
const res = await listModels({
baseURL: baseURL.value.trim() || AI_API_URL,
apiKey: apiKey.value.trim() || undefined
});
models.value = res.data ?? [];
if (!modelId.value && models.value.length) {
modelId.value = models.value[0].id;
}
} else {
if (!baseURL.value.trim()) {
baseURL.value = OLLAMA_API_URL;
}
const res = await listOllamaModels({
baseURL: baseURL.value.trim() || OLLAMA_API_URL
});
models.value = (res.models ?? []).map((m) => ({
id: m.name,
name: m.name,
object: 'model'
}));
if (!modelId.value && models.value.length) {
modelId.value = models.value[0].id;
}
}
} catch (e: any) {
errorText.value = e?.message ?? String(e);
if (
provider.value === 'openai' &&
String(errorText.value).includes('401')
) {
message.error(
'未认证(401):请填写 API Key或在本地用 AI_API_KEY 通过 /ai-proxy 注入'
);
} else {
message.error('加载模型列表失败');
}
} finally {
modelLoading.value = false;
}
};
const clearChat = () => {
history.value = [];
assistantText.value = '';
errorText.value = '';
};
const send = async () => {
if (!canSend.value) return;
sending.value = true;
assistantText.value = '';
errorText.value = '';
const system: Msg = { role: 'system', content: systemPrompt.value.trim() };
const user: Msg = { role: 'user', content: userPrompt.value.trim() };
const messages: Msg[] = [
...(system.content ? [system] : []),
...history.value,
user
];
const controller = new AbortController();
abortController.value = controller;
try {
if (provider.value === 'openai') {
if (stream.value) {
await chatCompletionsStream(
{
model: modelId.value,
messages,
temperature: temperature.value
},
{
baseURL: baseURL.value.trim() || AI_API_URL,
apiKey: apiKey.value.trim() || undefined,
signal: controller.signal,
onDelta: (t) => {
assistantText.value += t;
}
}
);
} else {
const res = await chatCompletions(
{
model: modelId.value,
messages,
temperature: temperature.value
},
{
baseURL: baseURL.value.trim() || AI_API_URL,
apiKey: apiKey.value.trim() || undefined,
signal: controller.signal
}
);
assistantText.value = res.choices?.[0]?.message?.content ?? '';
}
} else {
const ollamaMessages: OllamaChatMessage[] = messages.map((m) => ({
role: m.role as any,
content: m.content
}));
if (stream.value) {
await ollamaChatStream(
{
model: modelId.value,
messages: ollamaMessages,
options: { temperature: temperature.value }
},
{
baseURL: baseURL.value.trim() || OLLAMA_API_URL,
signal: controller.signal,
onDelta: (t) => {
assistantText.value += t;
}
}
);
} else {
const res = await ollamaChat(
{
model: modelId.value,
messages: ollamaMessages,
options: { temperature: temperature.value }
},
{
baseURL: baseURL.value.trim() || OLLAMA_API_URL,
signal: controller.signal
}
);
assistantText.value = res.message?.content ?? '';
}
}
history.value = [
...messages,
{ role: 'assistant', content: assistantText.value }
];
userPrompt.value = '';
} catch (e: any) {
// Abort is expected when clicking "Stop".
if (e?.name !== 'AbortError') {
errorText.value = e?.message ?? String(e);
message.error('请求失败(可能是 CORS 或鉴权问题)');
}
} finally {
sending.value = false;
abortController.value = null;
}
};
const modelOptions = computed(() => {
return models.value.map((m) => ({
label: m.name ?? m.id,
value: m.id
}));
});
// Load once for convenience; if the gateway blocks CORS you can still paste output from curl.
loadModels();
watch(
() => provider.value,
(p) => {
stop();
clearChat();
models.value = [];
modelId.value = '';
errorText.value = '';
baseURL.value = p === 'openai' ? AI_API_URL : OLLAMA_API_URL;
loadModels();
}
);
onBeforeUnmount(() => {
stop();
});
</script>
<template>
<div class="ele-body ele-body-card">
<a-card :bordered="false" :body-style="{ padding: '16px' }">
<a-space direction="vertical" style="width: 100%" :size="12">
<a-alert
type="info"
show-icon
message="AI 助手"
description="支持Qwen3.5、DeepSeek、Gemini3等主流的开源大模型免费使用"
/>
<a-row :gutter="12">
<!-- <a-col :xs="24" :md="6">-->
<!-- <a-select-->
<!-- v-model:value="provider"-->
<!-- :options="[-->
<!-- { label: 'OpenAI兼容(/v1)', value: 'openai' },-->
<!-- { label: 'Ollama原生(/api)', value: 'ollama' }-->
<!-- ]"-->
<!-- style="width: 100%"-->
<!-- />-->
<!-- </a-col>-->
<!-- <a-col :xs="24" :md="12">-->
<!-- <a-input-->
<!-- v-model:value="baseURL"-->
<!-- addon-before="BaseURL"-->
<!-- placeholder="https://ai-api.websoft.top/api/v1"-->
<!-- />-->
<!-- </a-col>-->
<a-col :xs="24" :md="12" v-if="provider === 'openai'">
<a-input-password
v-model:value="apiKey"
addon-before="API Key"
placeholder="可选(不建议在前端保存)"
/>
</a-col>
</a-row>
<a-row :gutter="12">
<a-col :xs="24" :md="12">
<a-select
v-model:value="modelId"
:options="modelOptions"
:loading="modelLoading"
placeholder="选择模型"
style="width: 100%"
/>
</a-col>
<a-col :xs="24" :md="12">
<a-space>
<a-button :loading="modelLoading" @click="loadModels"
>刷新模型</a-button
>
<span>流式</span>
<a-switch v-model:checked="stream" />
<span>温度</span>
<a-input-number
v-model:value="temperature"
:min="0"
:max="2"
:step="0.1"
/>
</a-space>
</a-col>
</a-row>
<!-- <a-textarea-->
<!-- v-model:value="systemPrompt"-->
<!-- :auto-size="{ minRows: 2, maxRows: 6 }"-->
<!-- placeholder="System Prompt可选"-->
<!-- />-->
<a-textarea
v-model:value="userPrompt"
:auto-size="{ minRows: 3, maxRows: 10 }"
placeholder="输入要问的问题..."
@pressEnter.exact.prevent="send"
/>
<a-space>
<a-button
type="primary"
:disabled="!canSend"
:loading="sending"
@click="send"
>发送</a-button
>
<a-button v-if="sending" danger @click="stop">停止</a-button>
<a-button @click="clearChat">清空</a-button>
</a-space>
<a-alert v-if="errorText" type="error" show-icon :message="errorText" />
<a-divider>输出</a-divider>
<a-card size="small" :bordered="true">
<pre class="output">{{ assistantText }}</pre>
</a-card>
<!-- <a-divider>历史最后一次请求</a-divider>-->
<!-- <a-card size="small" :bordered="true">-->
<!-- <pre class="output">{{ historyJson }}</pre>-->
<!-- </a-card>-->
</a-space>
</a-card>
</div>
</template>
<style scoped lang="less">
.output {
margin: 0;
white-space: pre-wrap;
word-break: break-word;
}
</style>