Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,9 @@
|
|
1 |
import gradio as gr
|
2 |
-
import logging, os, sys, threading
|
|
|
3 |
|
4 |
from dotenv import load_dotenv, find_dotenv
|
|
|
5 |
|
6 |
lock = threading.Lock()
|
7 |
|
@@ -13,14 +15,6 @@ RAG_OFF = "Off"
|
|
13 |
RAG_NAIVE = "Naive RAG"
|
14 |
RAG_ADVANCED = "Advanced RAG"
|
15 |
|
16 |
-
config = {
|
17 |
-
"chunk_overlap": 100, # split documents
|
18 |
-
"chunk_size": 2000, # split documents
|
19 |
-
"k": 2, # retrieve documents
|
20 |
-
"model_name": "gpt-4-0314", # llm
|
21 |
-
"temperature": 0 # llm
|
22 |
-
}
|
23 |
-
|
24 |
logging.basicConfig(stream = sys.stdout, level = logging.INFO)
|
25 |
logging.getLogger().addHandler(logging.StreamHandler(stream = sys.stdout))
|
26 |
|
@@ -34,21 +28,23 @@ def invoke(openai_api_key, prompt, rag_option):
|
|
34 |
|
35 |
with lock:
|
36 |
os.environ["OPENAI_API_KEY"] = openai_api_key
|
37 |
-
|
38 |
-
# if (RAG_INGESTION):
|
39 |
-
# if (rag_option == RAG_LANGCHAIN):
|
40 |
-
# #rag = LangChainRAG()
|
41 |
-
# #rag.ingestion(config)
|
42 |
-
# elif (rag_option == RAG_LLAMAINDEX):
|
43 |
-
# #rag = LlamaIndexRAG()
|
44 |
-
# #rag.ingestion(config)
|
45 |
-
|
46 |
-
completion = ""
|
47 |
-
result = ""
|
48 |
-
callback = ""
|
49 |
-
err_msg = ""
|
50 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
try:
|
53 |
#rag = LangChainRAG()
|
54 |
#completion, callback = rag.rag_chain(config, prompt)
|
@@ -68,24 +64,18 @@ def invoke(openai_api_key, prompt, rag_option):
|
|
68 |
del os.environ["OPENAI_API_KEY"]
|
69 |
"""
|
70 |
|
71 |
-
return ""
|
72 |
|
73 |
gr.close_all()
|
74 |
|
75 |
demo = gr.Interface(
|
76 |
fn = invoke,
|
77 |
inputs = [gr.Textbox(label = "OpenAI API Key", type = "password", lines = 1),
|
78 |
-
gr.Textbox(label = "Prompt", value = "
|
79 |
gr.Radio([RAG_OFF, RAG_NAIVE, RAG_ADVANCED], label = "Retrieval-Augmented Generation", value = RAG_ADVANCED)],
|
80 |
outputs = [gr.Textbox(label = "Completion")],
|
81 |
title = "Context-Aware Reasoning Application",
|
82 |
-
description = os.environ["DESCRIPTION"]
|
83 |
-
examples = [["sk-<BringYourOwn>", "What are GPT-4's media capabilities in 5 emojis and 1 sentence?", RAG_ADVANCED],
|
84 |
-
["sk-<BringYourOwn>", "List GPT-4's exam scores and benchmark results.", RAG_ADVANCED],
|
85 |
-
["sk-<BringYourOwn>", "Compare GPT-4 to GPT-3.5 in markdown table format.", RAG_ADVANCED],
|
86 |
-
["sk-<BringYourOwn>", "Write a Python program that calls the GPT-4 API.", RAG_ADVANCED],
|
87 |
-
["sk-<BringYourOwn>", "What is the GPT-4 API's cost and rate limit? Answer in English, Arabic, Chinese, Hindi, and Russian in JSON format.", RAG_ADVANCED]],
|
88 |
-
cache_examples = False
|
89 |
)
|
90 |
|
91 |
demo.launch()
|
|
|
1 |
import gradio as gr
|
2 |
+
import logging, os, sys, threading
|
3 |
+
import pandas as pd
|
4 |
|
5 |
from dotenv import load_dotenv, find_dotenv
|
6 |
+
from datasets import load_dataset
|
7 |
|
8 |
lock = threading.Lock()
|
9 |
|
|
|
15 |
RAG_NAIVE = "Naive RAG"
|
16 |
RAG_ADVANCED = "Advanced RAG"
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
logging.basicConfig(stream = sys.stdout, level = logging.INFO)
|
19 |
logging.getLogger().addHandler(logging.StreamHandler(stream = sys.stdout))
|
20 |
|
|
|
28 |
|
29 |
with lock:
|
30 |
os.environ["OPENAI_API_KEY"] = openai_api_key
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
31 |
|
32 |
+
###
|
33 |
+
dataset = load_dataset("MongoDB/airbnb_embeddings", streaming=True, split="train")
|
34 |
+
dataset = dataset.take(100)
|
35 |
+
dataset_df = pd.DataFrame(dataset)
|
36 |
+
dataset_df.head(5)
|
37 |
+
###
|
38 |
+
|
39 |
"""
|
40 |
+
if (RAG_INGESTION):
|
41 |
+
if (rag_option == RAG_LANGCHAIN):
|
42 |
+
#rag = LangChainRAG()
|
43 |
+
#rag.ingestion(config)
|
44 |
+
elif (rag_option == RAG_LLAMAINDEX):
|
45 |
+
#rag = LlamaIndexRAG()
|
46 |
+
#rag.ingestion(config)
|
47 |
+
|
48 |
try:
|
49 |
#rag = LangChainRAG()
|
50 |
#completion, callback = rag.rag_chain(config, prompt)
|
|
|
64 |
del os.environ["OPENAI_API_KEY"]
|
65 |
"""
|
66 |
|
67 |
+
return "TODO"
|
68 |
|
69 |
gr.close_all()
|
70 |
|
71 |
demo = gr.Interface(
|
72 |
fn = invoke,
|
73 |
inputs = [gr.Textbox(label = "OpenAI API Key", type = "password", lines = 1),
|
74 |
+
gr.Textbox(label = "Prompt", value = "TODO", lines = 1),
|
75 |
gr.Radio([RAG_OFF, RAG_NAIVE, RAG_ADVANCED], label = "Retrieval-Augmented Generation", value = RAG_ADVANCED)],
|
76 |
outputs = [gr.Textbox(label = "Completion")],
|
77 |
title = "Context-Aware Reasoning Application",
|
78 |
+
description = os.environ["DESCRIPTION"]
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
)
|
80 |
|
81 |
demo.launch()
|