cloneQ commited on
Commit
0648e59
·
verified ·
1 Parent(s): 3d695d6

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -21
app.py CHANGED
@@ -1,23 +1,7 @@
1
  import streamlit as st
2
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
3
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
4
- from llama_index.legacy.callbacks import CallbackManager
5
- from llama_index.llms.openai_like import OpenAILike
6
-
7
- # Create an instance of CallbackManager
8
- callback_manager = CallbackManager()
9
-
10
- api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
11
- model = "internlm2.5-latest"
12
- api_key = "eyJ0eXBlIjoiSldUIiwiYWxnIjoiSFM1MTIifQ.eyJqdGkiOiI1MDIxNzY4OSIsInJvbCI6IlJPTEVfUkVHSVNURVIiLCJpc3MiOiJPcGVuWExhYiIsImlhdCI6MTczMDM4MDY2MSwiY2xpZW50SWQiOiJlYm1ydm9kNnlvMG5semFlazF5cCIsInBob25lIjoiMTU3MjYzNjM5ODAiLCJ1dWlkIjoiZWFkYTY5ZDUtYjllNC00ZWM3LTk5OWUtZGY5OGQ0OGQwNjBiIiwiZW1haWwiOiIiLCJleHAiOjE3NDU5MzI2NjF9.hmpE8L-WDXmFMCnQag-rGMW1YDeU0cU9M2StKQBUKAsYUNxmodtOO_YbHoTyHBDRbRMx8rDooWCznebk9l6rWA"
13
-
14
- # api_base_url = "https://api.siliconflow.cn/v1"
15
- # model = "internlm/internlm2_5-7b-chat"
16
- # api_key = "请填写 API Key"
17
-
18
- llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
19
-
20
-
21
 
22
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
23
  st.title("llama_index_demo")
@@ -30,10 +14,15 @@ def init_models():
30
  )
31
  Settings.embed_model = embed_model
32
 
33
- #用初始化llm
 
 
 
 
 
34
  Settings.llm = llm
35
 
36
- documents = SimpleDirectoryReader("").load_data()
37
  index = VectorStoreIndex.from_documents(documents)
38
  query_engine = index.as_query_engine()
39
 
@@ -47,10 +36,10 @@ def greet2(question):
47
  response = st.session_state['query_engine'].query(question)
48
  return response
49
 
50
-
51
  # Store LLM generated responses
52
  if "messages" not in st.session_state.keys():
53
- st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
54
 
55
  # Display or clear chat messages
56
  for message in st.session_state.messages:
 
1
  import streamlit as st
2
  from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
3
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
4
+ from llama_index.llms.huggingface import HuggingFaceLLM
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
7
  st.title("llama_index_demo")
 
14
  )
15
  Settings.embed_model = embed_model
16
 
17
+ llm = HuggingFaceLLM(
18
+ model_name="internlm/internlm2_5-1_8b-chat",
19
+ tokenizer_name="internlm/internlm2_5-1_8b-chat",
20
+ model_kwargs={"trust_remote_code": True},
21
+ tokenizer_kwargs={"trust_remote_code": True}
22
+ )
23
  Settings.llm = llm
24
 
25
+ documents = SimpleDirectoryReader("/root/llamaindex_demo/data").load_data()
26
  index = VectorStoreIndex.from_documents(documents)
27
  query_engine = index.as_query_engine()
28
 
 
36
  response = st.session_state['query_engine'].query(question)
37
  return response
38
 
39
+
40
  # Store LLM generated responses
41
  if "messages" not in st.session_state.keys():
42
+ st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
43
 
44
  # Display or clear chat messages
45
  for message in st.session_state.messages: