Update app.py
Browse files
app.py
CHANGED
@@ -41,11 +41,11 @@ YOUTUBE_URL_3 = "https://www.youtube.com/watch?v=vw-KWfKwvTQ"
|
|
41 |
MODEL_NAME = "gpt-4"
|
42 |
|
43 |
def invoke(openai_api_key, use_rag, prompt):
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
# Document loading
|
50 |
#docs = []
|
51 |
# Load PDF
|
@@ -77,11 +77,11 @@ def invoke(openai_api_key, use_rag, prompt):
|
|
77 |
return_source_documents = True)
|
78 |
result = rag_chain({"query": prompt})
|
79 |
result = result["result"]
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
return result
|
86 |
|
87 |
description = """<strong>Overview:</strong> Reasoning application that demonstrates a <strong>Large Language Model (LLM)</strong> with
|
|
|
41 |
MODEL_NAME = "gpt-4"
|
42 |
|
43 |
def invoke(openai_api_key, use_rag, prompt):
|
44 |
+
try:
|
45 |
+
llm = ChatOpenAI(model_name = MODEL_NAME,
|
46 |
+
openai_api_key = openai_api_key,
|
47 |
+
temperature = 0)
|
48 |
+
if (use_rag):
|
49 |
# Document loading
|
50 |
#docs = []
|
51 |
# Load PDF
|
|
|
77 |
return_source_documents = True)
|
78 |
result = rag_chain({"query": prompt})
|
79 |
result = result["result"]
|
80 |
+
else:
|
81 |
+
chain = LLMChain(llm = llm, prompt = LLM_CHAIN_PROMPT)
|
82 |
+
result = chain.run({"question": prompt})
|
83 |
+
except Exception as e:
|
84 |
+
raise gr.Error(e)
|
85 |
return result
|
86 |
|
87 |
description = """<strong>Overview:</strong> Reasoning application that demonstrates a <strong>Large Language Model (LLM)</strong> with
|