yyyycc commited on
Commit
3675339
·
verified ·
1 Parent(s): 59fc28f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -17
app.py CHANGED
@@ -1,14 +1,25 @@
1
  import streamlit as st
2
- import os
3
- from llama_index.core import VectorStoreIndex, Settings
4
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
5
  from llama_index.legacy.callbacks import CallbackManager
6
  from llama_index.llms.openai_like import OpenAILike
7
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
9
  st.title("llama_index_demo")
10
 
11
- # Initialize models
12
  @st.cache_resource
13
  def init_models():
14
  embed_model = HuggingFaceEmbedding(
@@ -16,12 +27,7 @@ def init_models():
16
  )
17
  Settings.embed_model = embed_model
18
 
19
- llm = HuggingFaceLLM(
20
- model_name="internlm/internlm2-chat-1_8b",
21
- tokenizer_name="internlm/internlm2-chat-1_8b",
22
- model_kwargs={"trust_remote_code": True},
23
- tokenizer_kwargs={"trust_remote_code": True}
24
- )
25
  Settings.llm = llm
26
 
27
  documents = SimpleDirectoryReader("./").load_data()
@@ -30,23 +36,20 @@ def init_models():
30
 
31
  return query_engine
32
 
33
-
34
- # Check if models need initialization
35
  if 'query_engine' not in st.session_state:
36
  st.session_state['query_engine'] = init_models()
37
-
38
  def greet2(question):
39
- if st.session_state['query_engine'] is None:
40
- return "The models failed to initialize, please check your environment"
41
  response = st.session_state['query_engine'].query(question)
42
  return response
43
 
44
-
45
  # Store LLM generated responses
46
  if "messages" not in st.session_state.keys():
47
- st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
48
 
49
- # Display or clear chat messages
50
  for message in st.session_state.messages:
51
  with st.chat_message(message["role"]):
52
  st.write(message["content"])
 
1
  import streamlit as st
2
+ from llama_index.core import VectorStoreIndex, SimpleDirectoryReader, Settings
 
3
  from llama_index.embeddings.huggingface import HuggingFaceEmbedding
4
  from llama_index.legacy.callbacks import CallbackManager
5
  from llama_index.llms.openai_like import OpenAILike
6
 
7
+ # Create an instance of CallbackManager
8
+ callback_manager = CallbackManager()
9
+
10
+ api_base_url = "https://internlm-chat.intern-ai.org.cn/puyu/api/v1/"
11
+ model = "internlm2.5-latest"
12
+ api_key = os.environ['API_KEY']
13
+
14
+
15
+ llm =OpenAILike(model=model, api_base=api_base_url, api_key=api_key, is_chat_model=True,callback_manager=callback_manager)
16
+
17
+
18
+
19
  st.set_page_config(page_title="llama_index_demo", page_icon="🦜🔗")
20
  st.title("llama_index_demo")
21
 
22
+ # 初始化模型
23
  @st.cache_resource
24
  def init_models():
25
  embed_model = HuggingFaceEmbedding(
 
27
  )
28
  Settings.embed_model = embed_model
29
 
30
+ #用初始化llm
 
 
 
 
 
31
  Settings.llm = llm
32
 
33
  documents = SimpleDirectoryReader("./").load_data()
 
36
 
37
  return query_engine
38
 
39
+ # 检查是否需要初始化模型
 
40
  if 'query_engine' not in st.session_state:
41
  st.session_state['query_engine'] = init_models()
42
+
43
  def greet2(question):
 
 
44
  response = st.session_state['query_engine'].query(question)
45
  return response
46
 
47
+
48
  # Store LLM generated responses
49
  if "messages" not in st.session_state.keys():
50
+ st.session_state.messages = [{"role": "assistant", "content": "你好,我是你的助手,有什么我可以帮助你的吗?"}]
51
 
52
+ # Display or clear chat messages
53
  for message in st.session_state.messages:
54
  with st.chat_message(message["role"]):
55
  st.write(message["content"])