feat: RAG实验室参数持久化与LLM选择器优化 [AC-ASA-05, AC-ASA-14, AC-ASA-15]

- 新增ragLab store,使用localStorage持久化RAG实验参数
- 修复LLM选择器placeholder显示逻辑,正确显示当前配置
- 添加'已选择'标签区分用户选择与系统默认配置
- 优化下拉框样式防止标签被遮挡
This commit is contained in:
MerCry 2026-02-25 14:45:17 +08:00
parent 4579159c0a
commit 08e84d194f
2 changed files with 80 additions and 36 deletions

View File

@ -0,0 +1,41 @@
import { defineStore } from 'pinia'
import { ref, watch } from 'vue'
export const useRagLabStore = defineStore('ragLab', () => {
const query = ref(localStorage.getItem('ragLab_query') || '')
const kbIds = ref<string[]>(JSON.parse(localStorage.getItem('ragLab_kbIds') || '[]'))
const llmProvider = ref(localStorage.getItem('ragLab_llmProvider') || '')
const topK = ref(parseInt(localStorage.getItem('ragLab_topK') || '3', 10))
const scoreThreshold = ref(parseFloat(localStorage.getItem('ragLab_scoreThreshold') || '0.5'))
const generateResponse = ref(localStorage.getItem('ragLab_generateResponse') !== 'false')
const streamOutput = ref(localStorage.getItem('ragLab_streamOutput') === 'true')
watch(query, (val) => localStorage.setItem('ragLab_query', val))
watch(kbIds, (val) => localStorage.setItem('ragLab_kbIds', JSON.stringify(val)), { deep: true })
watch(llmProvider, (val) => localStorage.setItem('ragLab_llmProvider', val))
watch(topK, (val) => localStorage.setItem('ragLab_topK', String(val)))
watch(scoreThreshold, (val) => localStorage.setItem('ragLab_scoreThreshold', String(val)))
watch(generateResponse, (val) => localStorage.setItem('ragLab_generateResponse', String(val)))
watch(streamOutput, (val) => localStorage.setItem('ragLab_streamOutput', String(val)))
const clearParams = () => {
query.value = ''
kbIds.value = []
llmProvider.value = ''
topK.value = 3
scoreThreshold.value = 0.5
generateResponse.value = true
streamOutput.value = false
}
return {
query,
kbIds,
llmProvider,
topK,
scoreThreshold,
generateResponse,
streamOutput,
clearParams
}
})

View File

@ -21,7 +21,7 @@
<el-form label-position="top"> <el-form label-position="top">
<el-form-item label="查询 Query"> <el-form-item label="查询 Query">
<el-input <el-input
v-model="queryParams.query" v-model="query"
type="textarea" type="textarea"
:rows="4" :rows="4"
placeholder="输入测试问题..." placeholder="输入测试问题..."
@ -29,7 +29,7 @@
</el-form-item> </el-form-item>
<el-form-item label="知识库范围"> <el-form-item label="知识库范围">
<el-select <el-select
v-model="queryParams.kbIds" v-model="kbIds"
multiple multiple
placeholder="请选择知识库" placeholder="请选择知识库"
style="width: 100%" style="width: 100%"
@ -47,7 +47,7 @@
</el-form-item> </el-form-item>
<el-form-item label="LLM 模型"> <el-form-item label="LLM 模型">
<LLMSelector <LLMSelector
v-model="queryParams.llmProvider" v-model="llmProvider"
:providers="llmProviders" :providers="llmProviders"
:loading="llmLoading" :loading="llmLoading"
:current-provider="currentLLMProvider" :current-provider="currentLLMProvider"
@ -59,12 +59,12 @@
<el-form-item label="参数配置"> <el-form-item label="参数配置">
<div class="param-item"> <div class="param-item">
<span class="label">Top-K</span> <span class="label">Top-K</span>
<el-input-number v-model="queryParams.topK" :min="1" :max="10" /> <el-input-number v-model="topK" :min="1" :max="10" />
</div> </div>
<div class="param-item"> <div class="param-item">
<span class="label">Score Threshold</span> <span class="label">Score Threshold</span>
<el-slider <el-slider
v-model="queryParams.scoreThreshold" v-model="scoreThreshold"
:min="0" :min="0"
:max="1" :max="1"
:step="0.1" :step="0.1"
@ -73,11 +73,11 @@
</div> </div>
<div class="param-item"> <div class="param-item">
<span class="label">生成 AI 回复</span> <span class="label">生成 AI 回复</span>
<el-switch v-model="queryParams.generateResponse" /> <el-switch v-model="generateResponse" />
</div> </div>
<div class="param-item" v-if="queryParams.generateResponse"> <div class="param-item" v-if="generateResponse">
<span class="label">流式输出</span> <span class="label">流式输出</span>
<el-switch v-model="queryParams.streamOutput" /> <el-switch v-model="streamOutput" />
</div> </div>
</el-form-item> </el-form-item>
<el-button <el-button
@ -130,9 +130,9 @@
<pre><code>{{ finalPrompt }}</code></pre> <pre><code>{{ finalPrompt }}</code></pre>
</div> </div>
</el-tab-pane> </el-tab-pane>
<el-tab-pane label="AI 回复" name="ai-response" v-if="queryParams.generateResponse"> <el-tab-pane label="AI 回复" name="ai-response" v-if="generateResponse">
<StreamOutput <StreamOutput
v-if="queryParams.streamOutput" v-if="streamOutput"
:content="streamContent" :content="streamContent"
:is-streaming="streaming" :is-streaming="streaming"
:error="streamError" :error="streamError"
@ -157,12 +157,14 @@
</template> </template>
<script setup lang="ts"> <script setup lang="ts">
import { ref, reactive, onMounted, computed } from 'vue' import { ref, onMounted } from 'vue'
import { ElMessage } from 'element-plus' import { ElMessage } from 'element-plus'
import { Edit } from '@element-plus/icons-vue' import { Edit } from '@element-plus/icons-vue'
import { runRagExperiment, createSSEConnection, type AIResponse, type RetrievalResult } from '@/api/rag' import { runRagExperiment, createSSEConnection, type AIResponse, type RetrievalResult } from '@/api/rag'
import { getLLMProviders, getLLMConfig, type LLMProviderInfo } from '@/api/llm' import { getLLMProviders, getLLMConfig, type LLMProviderInfo } from '@/api/llm'
import { listKnowledgeBases } from '@/api/kb' import { listKnowledgeBases } from '@/api/kb'
import { useRagLabStore } from '@/stores/ragLab'
import { storeToRefs } from 'pinia'
import AIResponseViewer from '@/components/rag/AIResponseViewer.vue' import AIResponseViewer from '@/components/rag/AIResponseViewer.vue'
import StreamOutput from '@/components/rag/StreamOutput.vue' import StreamOutput from '@/components/rag/StreamOutput.vue'
import LLMSelector from '@/components/rag/LLMSelector.vue' import LLMSelector from '@/components/rag/LLMSelector.vue'
@ -173,6 +175,17 @@ interface KnowledgeBase {
documentCount: number documentCount: number
} }
const ragLabStore = useRagLabStore()
const {
query,
kbIds,
llmProvider,
topK,
scoreThreshold,
generateResponse,
streamOutput
} = storeToRefs(ragLabStore)
const loading = ref(false) const loading = ref(false)
const kbLoading = ref(false) const kbLoading = ref(false)
const llmLoading = ref(false) const llmLoading = ref(false)
@ -182,16 +195,6 @@ const knowledgeBases = ref<KnowledgeBase[]>([])
const llmProviders = ref<LLMProviderInfo[]>([]) const llmProviders = ref<LLMProviderInfo[]>([])
const currentLLMProvider = ref('') const currentLLMProvider = ref('')
const queryParams = reactive({
query: '',
kbIds: [] as string[],
llmProvider: '',
topK: 3,
scoreThreshold: 0.5,
generateResponse: true,
streamOutput: false
})
const retrievalResults = ref<RetrievalResult[]>([]) const retrievalResults = ref<RetrievalResult[]>([])
const finalPrompt = ref('') const finalPrompt = ref('')
const aiResponse = ref<AIResponse | null>(null) const aiResponse = ref<AIResponse | null>(null)
@ -232,18 +235,18 @@ const fetchLLMProviders = async () => {
} }
const handleLLMChange = (provider: LLMProviderInfo | undefined) => { const handleLLMChange = (provider: LLMProviderInfo | undefined) => {
queryParams.llmProvider = provider?.name || '' llmProvider.value = provider?.name || ''
} }
const handleRun = async () => { const handleRun = async () => {
if (!queryParams.query.trim()) { if (!query.value.trim()) {
ElMessage.warning('请输入查询 Query') ElMessage.warning('请输入查询 Query')
return return
} }
clearResults() clearResults()
if (queryParams.streamOutput && queryParams.generateResponse) { if (streamOutput.value && generateResponse.value) {
await runStreamExperiment() await runStreamExperiment()
} else { } else {
await runNormalExperiment() await runNormalExperiment()
@ -254,12 +257,12 @@ const runNormalExperiment = async () => {
loading.value = true loading.value = true
try { try {
const res: any = await runRagExperiment({ const res: any = await runRagExperiment({
query: queryParams.query, query: query.value,
kb_ids: queryParams.kbIds, kb_ids: kbIds.value,
top_k: queryParams.topK, top_k: topK.value,
score_threshold: queryParams.scoreThreshold, score_threshold: scoreThreshold.value,
llm_provider: queryParams.llmProvider || undefined, llm_provider: llmProvider.value || undefined,
generate_response: queryParams.generateResponse generate_response: generateResponse.value
}) })
retrievalResults.value = res.retrieval_results || res.retrievalResults || [] retrievalResults.value = res.retrieval_results || res.retrievalResults || []
@ -268,7 +271,7 @@ const runNormalExperiment = async () => {
diagnostics.value = res.diagnostics || null diagnostics.value = res.diagnostics || null
totalLatencyMs.value = res.total_latency_ms || res.totalLatencyMs || 0 totalLatencyMs.value = res.total_latency_ms || res.totalLatencyMs || 0
if (queryParams.generateResponse) { if (generateResponse.value) {
activeTab.value = 'ai-response' activeTab.value = 'ai-response'
} else { } else {
activeTab.value = 'retrieval' activeTab.value = 'retrieval'
@ -292,11 +295,11 @@ const runStreamExperiment = async () => {
abortStream = createSSEConnection( abortStream = createSSEConnection(
'/admin/rag/experiments/stream', '/admin/rag/experiments/stream',
{ {
query: queryParams.query, query: query.value,
kb_ids: queryParams.kbIds, kb_ids: kbIds.value,
top_k: queryParams.topK, top_k: topK.value,
score_threshold: queryParams.scoreThreshold, score_threshold: scoreThreshold.value,
llm_provider: queryParams.llmProvider || undefined, llm_provider: llmProvider.value || undefined,
generate_response: true generate_response: true
}, },
(data: string) => { (data: string) => {