alok94 commited on
Commit
245705d
β€’
1 Parent(s): 9b8a50b

change in prompt

Browse files
Files changed (2) hide show
  1. .gitignore +1 -0
  2. app.py +44 -15
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env
app.py CHANGED
@@ -1,12 +1,21 @@
1
  import streamlit as st
2
  import os
3
  import time
 
 
 
 
 
 
4
  from PyPDF2 import PdfReader
5
  from streamlit_extras.add_vertical_space import add_vertical_space
6
  from langchain.text_splitter import RecursiveCharacterTextSplitter
7
  #from langchain.embeddings.openai import OpenAIEmbeddings
8
  from langchain.vectorstores import faiss
9
 
 
 
 
10
  with st.sidebar:
11
  st.title("File Research using LLM")
12
  st.markdown(''' Upload your file and ask questions and do Research''')
@@ -23,19 +32,21 @@ with st.sidebar:
23
  length_function=len
24
  )
25
  chunks=text_splitter.split_text(text)
 
 
26
 
27
 
28
  st.write('Made by ALOK')
29
 
30
 
31
  def main():
32
- import replicate
33
  st.header('Talk to your file')
 
34
  #embeddings=OpenAIEmbeddings()
35
  #vectorstore=faiss.FAISS.from_texts(chunks, embedding=embeddings)
36
 
37
  # The meta/llama-2-70b-chat model can stream output as it's running.
38
-
39
 
40
  if "messages" not in st.session_state:
41
  st.session_state.messages = []
@@ -51,28 +62,46 @@ def main():
51
  st.chat_message("user").markdown(prompt)
52
  # Add user message to chat history
53
  st.session_state.messages.append({"role": "user", "content": prompt})
54
- replicate = replicate.Client(api_token='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI')
55
- output = replicate.run(
56
- "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
57
- input={"prompt": prompt}
 
 
 
 
 
 
 
 
 
 
 
 
58
  )
 
 
 
 
 
59
 
60
  # Display assistant response in chat message container
61
  with st.chat_message("assistant"):
62
  message_placeholder = st.empty()
 
63
 
64
- # The predict method returns an iterator, and you can iterate over that output.
65
- response_till_now=''
66
- for item in output:
67
- response_till_now+=item
68
- time.sleep(0.03)
69
- message_placeholder.markdown(response_till_now + "β–Œ")
70
- message_placeholder.markdown(response_till_now)
71
- response = f"AI: {response_till_now}"
72
 
73
 
74
  # Add assistant response to chat history
75
- st.session_state.messages.append({"role": "assistant", "content": response})
76
  # https://replicate.com/meta/llama-2-70b-chat/versions/02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3/api#output-schema
77
  #print(item, end="")
78
 
 
1
  import streamlit as st
2
  import os
3
  import time
4
+ from dotenv import load_dotenv
5
+ from getpass import getpass
6
+ from langchain.llms import replicate
7
+ from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
8
+ from langchain.prompts import PromptTemplate
9
+
10
  from PyPDF2 import PdfReader
11
  from streamlit_extras.add_vertical_space import add_vertical_space
12
  from langchain.text_splitter import RecursiveCharacterTextSplitter
13
  #from langchain.embeddings.openai import OpenAIEmbeddings
14
  from langchain.vectorstores import faiss
15
 
16
+ load_dotenv()
17
+ REPLICATE_API_TOKEN = os.environ.get("REPLICATE_API_TOKEN")
18
+
19
  with st.sidebar:
20
  st.title("File Research using LLM")
21
  st.markdown(''' Upload your file and ask questions and do Research''')
 
32
  length_function=len
33
  )
34
  chunks=text_splitter.split_text(text)
35
+
36
+
37
 
38
 
39
  st.write('Made by ALOK')
40
 
41
 
42
  def main():
 
43
  st.header('Talk to your file')
44
+ os.environ["REPLICATE_API_TOKEN"]=REPLICATE_API_TOKEN
45
  #embeddings=OpenAIEmbeddings()
46
  #vectorstore=faiss.FAISS.from_texts(chunks, embedding=embeddings)
47
 
48
  # The meta/llama-2-70b-chat model can stream output as it's running.
49
+
50
 
51
  if "messages" not in st.session_state:
52
  st.session_state.messages = []
 
62
  st.chat_message("user").markdown(prompt)
63
  # Add user message to chat history
64
  st.session_state.messages.append({"role": "user", "content": prompt})
65
+ replite_api='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI'
66
+
67
+
68
+ # rep = replicate.Client(api_token=replite_api)
69
+ # output = replicate.run(
70
+ # "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
71
+ # input={"prompt": prompt}
72
+ # )
73
+
74
+ model="meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3"
75
+ llm=replicate.Replicate(
76
+ streaming=True,
77
+ callbacks=[StreamingStdOutCallbackHandler()],
78
+ model=model,
79
+ model_kwargs={"temperature": 0.75, "max_length": 500, "top_p": 1},
80
+ replicate_api_token=REPLICATE_API_TOKEN
81
  )
82
+ prompt = """
83
+ User: Answer the following yes/no question by reasoning step by step. Please don't provide incomplete answer. Can a dog drive a car?
84
+ Assistant:
85
+ """
86
+
87
 
88
  # Display assistant response in chat message container
89
  with st.chat_message("assistant"):
90
  message_placeholder = st.empty()
91
+ message_placeholder.markdown(llm(prompt) + "β–Œ")
92
 
93
+ # # The predict method returns an iterator, and you can iterate over that output.
94
+ # response_till_now=''
95
+ # for item in output:
96
+ # response_till_now+=item
97
+ # time.sleep(0.03)
98
+ # message_placeholder.markdown(response_till_now + "β–Œ")
99
+ # message_placeholder.markdown(response_till_now)
100
+ # response = f"AI: {response_till_now}"
101
 
102
 
103
  # Add assistant response to chat history
104
+ # st.session_state.messages.append({"role": "assistant", "content": response})
105
  # https://replicate.com/meta/llama-2-70b-chat/versions/02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3/api#output-schema
106
  #print(item, end="")
107