import streamlit as st import networkx as nx import matplotlib.pyplot as plt import time import random # 1 Dynamic Query Decomposition and Visualization (动态查询分解和可视化) def decompose_query(query): # 模拟MindSearch的查询分解,返回一个DAG表示分解流程 dag = nx.DiGraph() dag.add_edges_from([ ("Main Query", "Sub-question 1"), ("Main Query", "Sub-question 2"), ("Sub-question 1", "Sub-question 1.1"), ("Sub-question 1", "Sub-question 1.2"), ("Sub-question 2", "Sub-question 2.1") ]) explanations = { "Main Query": "The main query to decompose.", "Sub-question 1": "First major component of the query.", "Sub-question 2": "Second major component of the query.", "Sub-question 1.1": "Detail of Sub-question 1.", "Sub-question 1.2": "Another aspect of Sub-question 1.", "Sub-question 2.1": "Detail of Sub-question 2." } return dag, explanations st.title("Dynamic Query Decomposition and Visualization") # 修改:使用唯一的 key 来避免重复 query_1 = st.text_input("Enter your query for Query Decomposition:", key="query_1") if query_1: st.subheader("Query Decomposition") dag, explanations = decompose_query(query_1) # Visualize DAG plt.figure(figsize=(10, 6)) pos = nx.spring_layout(dag) nx.draw(dag, pos, with_labels=True, node_color="lightblue", node_size=3000, font_size=10, font_weight="bold") st.pyplot(plt) # Display explanations st.subheader("Explanations for Sub-queries") for node, explanation in explanations.items(): st.write(f"**{node}:** {explanation}") # 2 Search Result Summarization and Comparison (搜寻结果摘要及比较) def fetch_results(query, source): # 模拟API结果返回 return [ {"title": f"{source} Result 1", "snippet": f"{source} snippet for {query}."}, {"title": f"{source} Result 2", "snippet": f"{source} snippet for {query}."} ] def summarize_results(results): return " ".join([result["snippet"] for result in results]) st.title("Search Result Summarization and Comparison") # 修改:使用唯一的 key 来避免重复 query_2 = st.text_input("Enter your query for Result Summarization:", key="query_2") if query_2: st.subheader("Fetching Results...") bing_results = fetch_results(query_2, "Bing") google_results = fetch_results(query_2, "Google") st.subheader("Results Comparison") col1, col2 = st.columns(2) with col1: st.write("**Bing Results:**") for result in bing_results: st.write(f"- {result['title']}: {result['snippet']}") st.write("**Summary:**", summarize_results(bing_results)) with col2: st.write("**Google Results:**") for result in google_results: st.write(f"- {result['title']}: {result['snippet']}") st.write("**Summary:**", summarize_results(google_results)) # 3 Search Engine Efficiency Test (搜索引擎效率测试) def mock_parallel_search(query): time.sleep(random.uniform(0.5, 1.0)) # Simulate API latency return random.randint(5, 15) # Mock number of pages retrieved st.title("Search Engine Efficiency Test") # 修改:使用唯一的 key 来避免重复 query_3 = st.text_input("Enter your query for Efficiency Test:", key="query_3") if query_3: st.subheader("Efficiency Comparison") start_time = time.time() # Simulate parallel search st.write("Executing parallel search...") pages_retrieved = [mock_parallel_search(query_3) for _ in range(3)] parallel_time = time.time() - start_time # Simulate sequential search st.write("Executing sequential search...") start_time = time.time() sequential_pages_retrieved = sum([mock_parallel_search(query_3) for _ in range(3)]) sequential_time = time.time() - start_time # Display metrics st.write(f"**Parallel Search:** {sum(pages_retrieved)} pages retrieved in {parallel_time:.2f} seconds.") st.write(f"**Sequential Search:** {sequential_pages_retrieved} pages retrieved in {sequential_time:.2f} seconds.") # 4 Integration and optimization (整合与优化) # def main(): # st.sidebar.title("MindSearch Demos") # demo = st.sidebar.radio("Select Demo", ["Query Decomposition", "Result Summarization", "Efficiency Test"]) # if demo == "Query Decomposition": # # 使用唯一的 key # query_1 = st.text_input("Enter your query for Query Decomposition:", key="query_1_decomposition") # if query_1: # st.subheader("Query Decomposition") # dag, explanations = decompose_query(query_1) # plt.figure(figsize=(10, 6)) # pos = nx.spring_layout(dag) # nx.draw(dag, pos, with_labels=True, node_color="lightblue", node_size=3000, font_size=10, font_weight="bold") # st.pyplot(plt) # st.subheader("Explanations for Sub-queries") # for node, explanation in explanations.items(): # st.write(f"**{node}:** {explanation}") # elif demo == "Result Summarization": # # 使用唯一的 key # query_2 = st.text_input("Enter your query for Result Summarization:", key="query_2_summarization") # if query_2: # st.subheader("Fetching Results...") # bing_results = fetch_results(query_2, "Bing") # google_results = fetch_results(query_2, "Google") # st.subheader("Results Comparison") # col1, col2 = st.columns(2) # with col1: # st.write("**Bing Results:**") # for result in bing_results: # st.write(f"- {result['title']}: {result['snippet']}") # st.write("**Summary:**", summarize_results(bing_results)) # with col2: # st.write("**Google Results:**") # for result in google_results: # st.write(f"- {result['title']}: {result['snippet']}") # st.write("**Summary:**", summarize_results(google_results)) # elif demo == "Efficiency Test": # # 使用唯一的 key # query_3 = st.text_input("Enter your query for Efficiency Test:", key="query_3_efficiency") # if query_3: # st.subheader("Efficiency Comparison") # start_time = time.time() # st.write("Executing parallel search...") # pages_retrieved = [mock_parallel_search(query_3) for _ in range(3)] # parallel_time = time.time() - start_time # st.write("Executing sequential search...") # start_time = time.time() # sequential_pages_retrieved = sum([mock_parallel_search(query_3) for _ in range(3)]) # sequential_time = time.time() - start_time # st.write(f"**Parallel Search:** {sum(pages_retrieved)} pages retrieved in {parallel_time:.2f} seconds.") # st.write(f"**Sequential Search:** {sequential_pages_retrieved} pages retrieved in {sequential_time:.2f} seconds.") # if __name__ == "__main__": # main()