alok94 commited on
Commit
da1fbe7
1 Parent(s): 4536c3f

basic working

Browse files
Files changed (2) hide show
  1. .env +2 -1
  2. app.py +41 -5
.env CHANGED
@@ -1 +1,2 @@
1
- OPENAI_API_KEY=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
 
 
1
+ OPENAI_API_KEY=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
2
+ REPLICATE_API_TOKEN=r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI
app.py CHANGED
@@ -1,4 +1,5 @@
1
  import streamlit as st
 
2
  from PyPDF2 import PdfReader
3
  from streamlit_extras.add_vertical_space import add_vertical_space
4
  from langchain.text_splitter import RecursiveCharacterTextSplitter
@@ -21,16 +22,51 @@ with st.sidebar:
21
  length_function=len
22
  )
23
  chunks=text_splitter.split_text(text)
24
- #embeddings=OpenAIEmbeddings()
25
- #vectorstore=faiss.FAISS.from_texts(chunks, embedding=embeddings)
26
 
27
-
28
-
29
- st.write(chunks)
30
  st.write('Made by ALOK')
 
31
 
32
  def main():
 
33
  st.header('Talk to your file')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
 
35
  if __name__=='__main__':
36
  main()
 
1
  import streamlit as st
2
+ import os
3
  from PyPDF2 import PdfReader
4
  from streamlit_extras.add_vertical_space import add_vertical_space
5
  from langchain.text_splitter import RecursiveCharacterTextSplitter
 
22
  length_function=len
23
  )
24
  chunks=text_splitter.split_text(text)
25
+
 
26
 
 
 
 
27
  st.write('Made by ALOK')
28
+
29
 
30
  def main():
31
+ import replicate
32
  st.header('Talk to your file')
33
+ #embeddings=OpenAIEmbeddings()
34
+ #vectorstore=faiss.FAISS.from_texts(chunks, embedding=embeddings)
35
+
36
+ # The meta/llama-2-70b-chat model can stream output as it's running.
37
+
38
+
39
+ if "messages" not in st.session_state:
40
+ st.session_state.messages = []
41
+
42
+ # Display chat messages from history on app rerun
43
+ for message in st.session_state.messages:
44
+ with st.chat_message(message["role"]):
45
+ st.markdown(message["content"])
46
+
47
+ # React to user input
48
+ if prompt := st.chat_input("Type Here"):
49
+ # Display user message in chat message container
50
+ st.chat_message("user").markdown(prompt)
51
+ # Add user message to chat history
52
+ st.session_state.messages.append({"role": "user", "content": prompt})
53
+ replicate = replicate.Client(api_token='r8_4fktoXrDGkgHY8uw1XlVtQJKQlAILKv0iBmPI')
54
+ output = replicate.run(
55
+ "meta/llama-2-70b-chat:02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3",
56
+ input={"prompt": prompt}
57
+ )
58
+ # The predict method returns an iterator, and you can iterate over that output.
59
+ res=''
60
+ for item in output:
61
+ res+=item
62
+ response = f"AI: {res}"
63
+ # Display assistant response in chat message container
64
+ with st.chat_message("assistant"):
65
+ st.markdown(response)
66
+ # Add assistant response to chat history
67
+ st.session_state.messages.append({"role": "assistant", "content": response})
68
+ # https://replicate.com/meta/llama-2-70b-chat/versions/02e509c789964a7ea8736978a43525956ef40397be9033abf9fd2badfe68c9e3/api#output-schema
69
+ #print(item, end="")
70
 
71
  if __name__=='__main__':
72
  main()