eagle0504 commited on
Commit
bdb22b7
1 Parent(s): 11effaf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -1
app.py CHANGED
@@ -5,6 +5,7 @@ import streamlit as st
5
  from langchain.agents import AgentType, initialize_agent, load_tools
6
  from langchain.llms import OpenAI as l_OpenAI
7
  from transformers import pipeline
 
8
 
9
  from helpers.foundation_models import *
10
 
@@ -13,6 +14,26 @@ SERPAPI_API_KEY = os.environ["SERPAPI_API_KEY"]
13
  openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
14
 
15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  # Initialize chat history
17
  if "messages" not in st.session_state:
18
  st.session_state.messages = []
@@ -38,7 +59,7 @@ with st.expander("Instructions"):
38
 
39
  option = st.sidebar.selectbox(
40
  "Which task do you want to do?",
41
- ("Sentiment Analysis", "Medical Summarization", "ChatGPT", "ChatGPT (with Google)"),
42
  )
43
 
44
 
@@ -78,6 +99,10 @@ if prompt := st.chat_input("What is up?"):
78
  if prompt:
79
  out = pipe_summarization(prompt)
80
  doc = out[0]["summary_text"]
 
 
 
 
81
  elif option == "ChatGPT":
82
  if prompt:
83
  out = call_chatgpt(query=prompt)
 
5
  from langchain.agents import AgentType, initialize_agent, load_tools
6
  from langchain.llms import OpenAI as l_OpenAI
7
  from transformers import pipeline
8
+ from transformers import AutoTokenizer, AutoModelForCausalLM
9
 
10
  from helpers.foundation_models import *
11
 
 
14
  openai_client = openai.OpenAI(api_key=OPENAI_API_KEY)
15
 
16
 
17
+ tokenizer = AutoTokenizer.from_pretrained("eagle0504/llama-2-7b-miniguanaco")
18
+ model = AutoModelForCausalLM.from_pretrained("eagle0504/llama-2-7b-miniguanaco")
19
+
20
+
21
+ def generate_response_from_llama2(query):
22
+
23
+ # Tokenize the input text
24
+ input_ids = tokenizer.encode(query, return_tensors="pt")
25
+
26
+ # Generate a response
27
+ # Adjust the parameters like max_length according to your needs
28
+ output = model.generate(input_ids, max_length=50, num_return_sequences=1, temperature=0.7)
29
+
30
+ # Decode the output to human-readable text
31
+ generated_text = tokenizer.decode(output[0], skip_special_tokens=True)
32
+
33
+ # output
34
+ return generated_text
35
+
36
+
37
  # Initialize chat history
38
  if "messages" not in st.session_state:
39
  st.session_state.messages = []
 
59
 
60
  option = st.sidebar.selectbox(
61
  "Which task do you want to do?",
62
+ ("Sentiment Analysis", "Medical Summarization", "Llama2", "ChatGPT", "ChatGPT (with Google)"),
63
  )
64
 
65
 
 
99
  if prompt:
100
  out = pipe_summarization(prompt)
101
  doc = out[0]["summary_text"]
102
+ elif option == "Llama2"
103
+ if prompt:
104
+ out = generate_response_from_llama2(query=prompt)
105
+ doc = out
106
  elif option == "ChatGPT":
107
  if prompt:
108
  out = call_chatgpt(query=prompt)