Compare commits

...

2 Commits

Author SHA1 Message Date
d466c9e9a8 fix(shop): 修复店铺二维码接口地址错误
- 将二维码接口地址从 mp-api.websoft.top 修改为 cms-api.websoft.top
- 修正扫码登录获取订单二维码的接口路径
- 确保用户ID参数正确拼接到请求URL中
2026-04-14 01:48:53 +08:00
d079a28ffc feat(ai): 添加AI模块文档和重构前端AI组件
- 新增 docs/ai/README.md 包含完整的AI模块配置、建表、API文档
- 重构 src/views/ai/index.vue 组件,移除硬编码BASE_URL和多余参数
- 添加 src/api/ai/backend.ts 统一的AI后端API接口实现
- 集成模型列表、流式对话、非流式对话等功能
- 实现SE流式响应处理和鉴权头自动携带
- 移除历史消息存储和温度参数等冗余功能
2026-02-28 11:02:40 +08:00
4 changed files with 282 additions and 69 deletions

61
docs/ai/README.md Normal file
View File

@@ -0,0 +1,61 @@
# AI 模块Ollama + RAG + 订单分析)
## 1. 配置
`src/main/resources/application.yml`
- `ai.ollama.base-url`:主地址(例如 `https://ai-api.websoft.top`
- `ai.ollama.fallback-url`:备用地址(例如 `http://47.119.165.234:11434`
- `ai.ollama.chat-model`:对话模型(`qwen3.5:cloud`
- `ai.ollama.embed-model`:向量模型(`qwen3-embedding:4b`
## 2. 建表(知识库)
执行:`docs/ai/ai_kb_tables.sql`
## 3. API
说明:所有接口默认需要登录(`@PreAuthorize("isAuthenticated()")`),并且要求能够拿到 `tenantId`header 或登录用户)。
### 3.1 对话
- `GET /api/ai/models`:获取 Ollama 模型列表
- `POST /api/ai/chat`:非流式对话
- `POST /api/ai/chat/stream`流式对话SSE
- `GET /api/ai/chat/stream?prompt=...`流式对话SSE适配 EventSource
请求示例(非流式):
```json
{
"prompt": "帮我写一个退款流程说明"
}
```
### 3.2 知识库RAG
- `POST /api/ai/kb/upload`:上传文档入库(建议 txt/md/html
- `POST /api/ai/kb/sync/cms`:同步 CMS 已发布文章到知识库(当前租户)
- `POST /api/ai/kb/query`:仅检索 topK
- `POST /api/ai/kb/ask`:检索 + 生成答案(答案要求引用 chunk_id
请求示例ask
```json
{
"question": "怎么开具发票?",
"topK": 5
}
```
### 3.3 商城订单分析(按租户/按天)
- `POST /api/ai/analytics/query`:返回按天指标数据
- `POST /api/ai/analytics/ask`:基于指标数据生成分析结论
请求示例ask
```json
{
"question": "最近30天支付率有没有明显下滑请给出原因排查建议。",
"startDate": "2026-02-01",
"endDate": "2026-02-27"
}
```

200
src/api/ai/backend.ts Normal file
View File

@@ -0,0 +1,200 @@
import request from '@/utils/request';
import type { ApiResult } from '@/api';
import { API_BASE_URL, TOKEN_HEADER_NAME } from '@/config/setting';
import { getToken } from '@/utils/token-util';
import { getHostname, getTenantId } from '@/utils/domain';
import { getMerchantId } from '@/utils/merchant';
export interface AiModel {
name?: string;
id?: string;
[k: string]: unknown;
}
export interface AiChatRequest {
prompt: string;
}
function normalizeBaseURL(baseURL: string) {
return baseURL.replace(/\/+$/, '');
}
// Keep consistent with `src/utils/request.ts` but usable from `fetch` for SSE.
function getRuntimeApiBaseURL(): string {
try {
const apiUrl = localStorage.getItem('ApiUrl');
if (apiUrl) return normalizeBaseURL(apiUrl);
} catch {
// ignore
}
return normalizeBaseURL(API_BASE_URL || '');
}
function buildAuthHeaders(): Record<string, string> {
const headers: Record<string, string> = {};
const token = getToken();
if (token) headers[TOKEN_HEADER_NAME] = token;
// Mirror axios interceptor behavior; add fallbacks to make localhost dev usable.
const tenantId =
getTenantId() ?? (localStorage.getItem('TenantId') || undefined);
if (tenantId) headers.TenantId = String(tenantId);
const companyId = localStorage.getItem('CompanyId');
if (companyId) headers.CompanyId = companyId;
const merchantId = getMerchantId?.();
if (merchantId) headers.MerchantId = String(merchantId);
const domain = getHostname?.();
if (domain) headers.Domain = domain;
return headers;
}
function extractText(payload: unknown): string {
if (payload == null) return '';
if (typeof payload === 'string') return payload;
if (typeof payload === 'number' || typeof payload === 'boolean') {
return String(payload);
}
if (Array.isArray(payload)) return payload.map(extractText).join('');
if (typeof payload === 'object') {
const obj: any = payload;
const candidates: unknown[] = [
obj.text,
obj.answer,
obj.content,
obj.data,
obj.message?.content,
obj.delta?.content,
obj.choices?.[0]?.delta?.content,
obj.choices?.[0]?.message?.content
];
for (const c of candidates) {
const t = extractText(c);
if (t) return t;
}
}
return '';
}
export function normalizeModels(payload: unknown): AiModel[] {
if (!payload) return [];
if (Array.isArray(payload)) {
return payload.map((m) =>
typeof m === 'string' ? ({ name: m } as AiModel) : (m as AiModel)
);
}
if (typeof payload === 'object') {
const obj: any = payload;
if (Array.isArray(obj.models)) {
return obj.models.map((m: any) =>
typeof m === 'string' ? ({ name: m } as AiModel) : (m as AiModel)
);
}
}
return [];
}
/**
* 后端 AI 模块GET /api/ai/models
* 前端 baseURL 一般已包含 `/api`,因此这里用相对路径 `/ai/models`。
*/
export async function aiListModels(): Promise<unknown> {
const res = await request.get<ApiResult<unknown>>('/ai/models');
if (res.data.code === 0) return res.data.data;
return Promise.reject(new Error(res.data.message));
}
/**
* 后端 AI 模块POST /api/ai/chat
*/
export async function aiChat(body: AiChatRequest): Promise<string> {
const res = await request.post<ApiResult<unknown>>('/ai/chat', body);
if (res.data.code === 0) return extractText(res.data.data);
return Promise.reject(new Error(res.data.message));
}
/**
* 后端 AI 模块POST /api/ai/chat/streamSSE
* 用 fetch 以便读取 ReadableStream并附带鉴权/租户头。
*/
export async function aiChatStream(
body: AiChatRequest,
opts: {
signal?: AbortSignal;
onDelta: (text: string) => void;
onDone?: () => void;
}
): Promise<void> {
const baseURL = getRuntimeApiBaseURL();
const url = `${baseURL}/ai/chat/stream`;
const res = await fetch(url, {
method: 'POST',
headers: {
Accept: 'text/event-stream',
'Content-Type': 'application/json',
...buildAuthHeaders()
},
body: JSON.stringify(body),
signal: opts.signal
});
if (!res.ok || !res.body) {
const text = await res.text().catch(() => '');
throw new Error(
`aiChatStream failed: ${res.status} ${res.statusText}${
text ? ` - ${text}` : ''
}`
);
}
const reader = res.body.getReader();
const decoder = new TextDecoder('utf-8');
let buffer = '';
while (true) {
const { value, done } = await reader.read();
if (done) break;
buffer += decoder.decode(value, { stream: true });
// Some servers use CRLF; normalize to simplify parsing.
buffer = buffer.replace(/\r\n/g, '\n').replace(/\r/g, '\n');
// SSE events are separated by blank lines.
let idx = buffer.indexOf('\n\n');
while (idx !== -1) {
const rawEvent = buffer.slice(0, idx);
buffer = buffer.slice(idx + 2);
idx = buffer.indexOf('\n\n');
const dataParts: string[] = [];
for (const line of rawEvent.split('\n')) {
const trimmed = line.trimStart();
if (!trimmed.startsWith('data:')) continue;
dataParts.push(trimmed.slice(5).trimStart());
}
if (!dataParts.length) continue;
const data = dataParts.join('\n').trim();
if (!data) continue;
if (data === '[DONE]') {
opts.onDone?.();
return;
}
let delta = '';
try {
delta = extractText(JSON.parse(data));
} catch {
// Some servers stream plain text without JSON.
delta = data;
}
if (delta) opts.onDelta(delta);
}
}
opts.onDone?.();
}

View File

@@ -2,41 +2,26 @@
import { computed, onBeforeUnmount, ref } from 'vue';
import { message } from 'ant-design-vue';
import {
listOllamaModels,
ollamaChat,
ollamaChatStream,
type OllamaChatMessage
} from '@/api/ai/ollama';
type Msg = OllamaChatMessage;
// Hardcode endpoint to avoid going through mp.websoft.top `/proxy`.
// The API methods append `/api/*` paths.
//
// IMPORTANT: do not use `127.0.0.1` in browser production builds:
// it points to the visitor's machine, not your server.
// If you want to use server-local Ollama (`127.0.0.1:11434`), put it behind an HTTPS reverse proxy
// (e.g. `https://ai-api.websoft.top` or same-origin `/proxy`).
const BASE_URL = 'https://ai-api.websoft.top';
aiChat,
aiChatStream,
aiListModels,
normalizeModels
} from '@/api/ai/backend';
const modelLoading = ref(false);
const models = ref<Array<{ id: string; name?: string }>>([]);
const modelId = ref<string>('');
const systemPrompt = ref<string>('你是一个有帮助的助手。');
const userPrompt = ref<string>('你好,介绍一下你能做什么。');
const temperature = ref<number>(0.7);
const stream = ref<boolean>(true);
const sending = ref(false);
const assistantText = ref<string>('');
const errorText = ref<string>('');
const history = ref<Msg[]>([]);
const canSend = computed(() => {
return !!modelId.value && !!userPrompt.value.trim() && !sending.value;
return !!userPrompt.value.trim() && !sending.value;
});
const abortController = ref<AbortController | null>(null);
@@ -51,13 +36,14 @@
modelLoading.value = true;
errorText.value = '';
try {
const res = await listOllamaModels({
baseURL: BASE_URL
});
models.value = (res.models ?? []).map((m) => ({
id: m.name,
name: m.name
}));
const res = await aiListModels();
const ms = normalizeModels(res);
models.value = ms
.map((m) => ({
id: String(m.name ?? m.id ?? ''),
name: String(m.name ?? m.id ?? '')
}))
.filter((m) => !!m.id);
if (!modelId.value && models.value.length) {
modelId.value = models.value[0].id;
}
@@ -70,7 +56,6 @@
};
const clearChat = () => {
history.value = [];
assistantText.value = '';
errorText.value = '';
};
@@ -82,27 +67,16 @@
assistantText.value = '';
errorText.value = '';
const system: Msg = { role: 'system', content: systemPrompt.value.trim() };
const user: Msg = { role: 'user', content: userPrompt.value.trim() };
const messages: Msg[] = [
...(system.content ? [system] : []),
...history.value,
user
];
const prompt = userPrompt.value.trim();
const controller = new AbortController();
abortController.value = controller;
try {
if (stream.value) {
await ollamaChatStream(
await aiChatStream(
{ prompt },
{
model: modelId.value,
messages,
options: { temperature: temperature.value }
},
{
baseURL: BASE_URL,
signal: controller.signal,
onDelta: (t) => {
assistantText.value += t;
@@ -110,30 +84,15 @@
}
);
} else {
const res = await ollamaChat(
{
model: modelId.value,
messages,
options: { temperature: temperature.value }
},
{
baseURL: BASE_URL,
signal: controller.signal
}
);
assistantText.value = res.message?.content ?? '';
// axios 已在拦截器里自动带上 token/tenant
assistantText.value = await aiChat({ prompt });
}
history.value = [
...messages,
{ role: 'assistant', content: assistantText.value }
];
userPrompt.value = '';
} catch (e: any) {
// Abort is expected when clicking "Stop".
if (e?.name !== 'AbortError') {
errorText.value = e?.message ?? String(e);
message.error('请求失败(可能是 CORS 或鉴权问题');
message.error('请求失败(可能是后端未启动 / 鉴权 / 租户头缺失');
}
} finally {
sending.value = false;
@@ -184,13 +143,6 @@
>
<span>流式</span>
<a-switch v-model:checked="stream" />
<span>温度</span>
<a-input-number
v-model:value="temperature"
:min="0"
:max="2"
:step="0.1"
/>
</a-space>
</a-col>
</a-row>

View File

@@ -133,7 +133,7 @@
const loading = ref(true);
const getQrCodeUrl = (userId?: number) => {
return `https://mp-api.websoft.top/api/wx-login/getOrderQRCodeUnlimited/uid_${
return `https://cms-api.websoft.top/api/wx-login/getOrderQRCodeUnlimited/uid_${
userId ?? ''
}`;
};