brianjking commited on
Commit
3b8cc91
1 Parent(s): f9fdaeb

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -6,7 +6,7 @@ from llama_index import (
6
  VectorStoreIndex,
7
  )
8
  from llama_index.llms import OpenAI
9
- import openai
10
 
11
  # Define Streamlit layout and interaction
12
  st.title("Grounded Generations")
@@ -25,7 +25,7 @@ def load_data(uploaded_file):
25
  docs = reader.load_data()
26
  service_context = ServiceContext.from_defaults(
27
  llm=OpenAI(
28
- model="gpt-3.5-turbo-16k",
29
  temperature=0.1,
30
  ),
31
  system_prompt="You are an AI assistant that uses context from a PDF to assist the user in generating text."
@@ -48,12 +48,12 @@ if 'retrieved_text' not in st.session_state:
48
  if st.button("Retrieve"):
49
  with st.spinner('Retrieving text...'):
50
  # Use VectorStoreIndex to search
51
- query_engine = index.as_query_engine(similarity_top_k=3)
52
  st.session_state['retrieved_text'] = query_engine.query(user_query)
53
- st.write(f"Retrieved Text: {st.session_state['retrieved_text']}") # store the retrieved text as a streamlit state variable
54
 
55
  # Select content type
56
- content_type = st.selectbox("Select content type:", ["Blog", "Tweet"]) # make some default nonsense
57
 
58
  # Generate text based on retrieved text and selected content type
59
  if st.button("Generate") and content_type:
@@ -62,17 +62,17 @@ if st.button("Generate") and content_type:
62
  openai.api_key = os.getenv("OPENAI_API_KEY")
63
  try:
64
  if content_type == "Blog":
65
- prompt = f"Write a blog about 500 words in length using the {st.session_state['retrieved_text']}" #uses content from the stored state
66
  elif content_type == "Tweet":
67
  prompt = f"Compose a tweet using the {st.session_state['retrieved_text']}"
68
  response = openai.ChatCompletion.create(
69
- model="gpt-3.5-turbo-16k",
70
  messages=[
71
  {"role": "system", "content": "You are a helpful assistant."},
72
  {"role": "user", "content": prompt}
73
  ]
74
  )
75
- generated_text = response['choices'][0]['message']['content']
76
  st.write(f"Generated Text: {generated_text}")
77
  except Exception as e:
78
  st.write(f"An error occurred: {e}")
 
6
  VectorStoreIndex,
7
  )
8
  from llama_index.llms import OpenAI
9
+ from openai import OpenAI
10
 
11
  # Define Streamlit layout and interaction
12
  st.title("Grounded Generations")
 
25
  docs = reader.load_data()
26
  service_context = ServiceContext.from_defaults(
27
  llm=OpenAI(
28
+ model="gpt-3.5-turbo-0125", # Updated model
29
  temperature=0.1,
30
  ),
31
  system_prompt="You are an AI assistant that uses context from a PDF to assist the user in generating text."
 
48
  if st.button("Retrieve"):
49
  with st.spinner('Retrieving text...'):
50
  # Use VectorStoreIndex to search
51
+ query_engine = index.as_query_engine(similarity_top_k=3)
52
  st.session_state['retrieved_text'] = query_engine.query(user_query)
53
+ st.write(f"Retrieved Text: {st.session_state['retrieved_text']}")
54
 
55
  # Select content type
56
+ content_type = st.selectbox("Select content type:", ["Blog", "Tweet"])
57
 
58
  # Generate text based on retrieved text and selected content type
59
  if st.button("Generate") and content_type:
 
62
  openai.api_key = os.getenv("OPENAI_API_KEY")
63
  try:
64
  if content_type == "Blog":
65
+ prompt = f"Write a blog about 500 words in length using the {st.session_state['retrieved_text']}"
66
  elif content_type == "Tweet":
67
  prompt = f"Compose a tweet using the {st.session_state['retrieved_text']}"
68
  response = openai.ChatCompletion.create(
69
+ model="gpt-3.5-turbo-0125", # Updated model
70
  messages=[
71
  {"role": "system", "content": "You are a helpful assistant."},
72
  {"role": "user", "content": prompt}
73
  ]
74
  )
75
+ generated_text = response.choices[0].message.content
76
  st.write(f"Generated Text: {generated_text}")
77
  except Exception as e:
78
  st.write(f"An error occurred: {e}")