bstraehle commited on
Commit
25d020d
1 Parent(s): 7841594

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -44,8 +44,8 @@ def invoke(openai_api_key, use_rag, prompt):
44
  result = rag_chain({"query": prompt})
45
  result = result["result"]
46
  else:
47
- chain = LLMChain(llm = llm, prompt = CHAIN_PROMPT)
48
- result = chain.run({"context": "", "question": prompt})
49
  #print(result)
50
  return result
51
 
@@ -57,7 +57,7 @@ description = """<strong>Overview:</strong> The app demonstrates how to use a La
57
  <ul style="list-style-type:square;">
58
  <li>Set "Use RAG" to "False" and submit prompt "what is gpt-4". The LLM <strong>without</strong> RAG does not know the answer.</li>
59
  <li>Set "Use RAG" to "True" and submit prompt "what is gpt-4". The LLM <strong>with</strong> RAG knows the answer.</li>
60
- <li>Experiment with different prompts, for example "what is gpt-4, answer in german" or "write a haiku about gpt-4".</li>
61
  </ul>
62
  In a production system, processing external data would be done in a batch process. An idea for a production system would be to perform LLM use cases on the
63
  <a href='https://www.youtube.com/playlist?list=PL2yQDdvlhXf_hIzmfHCdbcXj2hS52oP9r'>AWS re:Invent playlist</a>.\n\n
 
44
  result = rag_chain({"query": prompt})
45
  result = result["result"]
46
  else:
47
+ chain = LLMChain(llm = llm, prompt = prompt)
48
+ result = chain.run()
49
  #print(result)
50
  return result
51
 
 
57
  <ul style="list-style-type:square;">
58
  <li>Set "Use RAG" to "False" and submit prompt "what is gpt-4". The LLM <strong>without</strong> RAG does not know the answer.</li>
59
  <li>Set "Use RAG" to "True" and submit prompt "what is gpt-4". The LLM <strong>with</strong> RAG knows the answer.</li>
60
+ <li>Experiment with different prompts, for example "what is gpt-4, answer in german" or "write a poem about gpt-4".</li>
61
  </ul>
62
  In a production system, processing external data would be done in a batch process. An idea for a production system would be to perform LLM use cases on the
63
  <a href='https://www.youtube.com/playlist?list=PL2yQDdvlhXf_hIzmfHCdbcXj2hS52oP9r'>AWS re:Invent playlist</a>.\n\n