hayuh commited on
Commit
739f9ff
·
verified ·
1 Parent(s): 982331e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -3
app.py CHANGED
@@ -1,8 +1,97 @@
 
 
 
1
  import gradio as gr
2
- from huggingface_hub import InferenceClient
 
3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  def ask_agent(question):
5
- return str("Hello")
 
6
 
7
  # Create the Gradio interface
8
  iface = gr.Interface(
@@ -13,4 +102,43 @@ iface = gr.Interface(
13
  )
14
 
15
  # Launch the Gradio app
16
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ from pathlib import Path
4
  import gradio as gr
5
+ import nest_asyncio
6
+ import dill as pickle
7
 
8
+ # Ensure async compatibility in Jupyter
9
+ nest_asyncio.apply()
10
+
11
+ # Import OpenAI key with helper function
12
+ from helper import get_openai_api_key
13
+ OPENAI_API_KEY = get_openai_api_key()
14
+
15
+ # Define the path to the directory containing the PDF files
16
+ folder_path = 'Ehlers-Danlos-1'
17
+
18
+ # Get the list of all PDF files in the directory
19
+ pdf_files = glob.glob(os.path.join(folder_path, '*.pdf'))
20
+ print(pdf_files)
21
+
22
+ # Extract just the filenames (optional)
23
+ pdf_filenames = [os.path.basename(pdf) for pdf in pdf_files]
24
+ print(pdf_filenames)
25
+
26
+ # Import utilities
27
+ from utils import get_doc_tools
28
+
29
+ # Truncate function names if necessary
30
+ def truncate_function_name(name, max_length=64):
31
+ return name if len(name) <= max_length else name[:max_length]
32
+
33
+ # Path to save/load serialized tools
34
+ tools_cache_path = 'tools_cache.pkl'
35
+
36
+ # Initialize paper_to_tools_dict
37
+ paper_to_tools_dict = {}
38
+
39
+ # Check if the cache file exists and is not empty
40
+ if os.path.exists(tools_cache_path) and os.path.getsize(tools_cache_path) > 0:
41
+ try:
42
+ with open(tools_cache_path, 'rb') as f:
43
+ paper_to_tools_dict = pickle.load(f)
44
+ except EOFError:
45
+ print("Cache file is corrupted. Recreating tools.")
46
+ paper_to_tools_dict = {}
47
+ else:
48
+ print("Cache file does not exist or is empty. Recreating tools.")
49
+
50
+ # Create tools for each PDF if not loaded from cache
51
+ if not paper_to_tools_dict:
52
+ for pdf in pdf_files:
53
+ print(f"Getting tools for paper: {pdf}")
54
+ vector_tool, summary_tool = get_doc_tools(pdf, Path(pdf).stem)
55
+ paper_to_tools_dict[pdf] = [vector_tool, summary_tool]
56
+
57
+ # Save tools to cache
58
+ with open(tools_cache_path, 'wb') as f:
59
+ pickle.dump(paper_to_tools_dict, f)
60
+
61
+
62
+ # Combine all tools into a single list
63
+ all_tools = [t for pdf in pdf_files for t in paper_to_tools_dict[pdf]]
64
+
65
+ # Define an object index and retriever over these tools
66
+ from llama_index.core import VectorStoreIndex
67
+ from llama_index.core.objects import ObjectIndex
68
+
69
+ obj_index = ObjectIndex.from_objects(
70
+ all_tools,
71
+ index_cls=VectorStoreIndex,
72
+ )
73
+
74
+ obj_retriever = obj_index.as_retriever(similarity_top_k=3)
75
+
76
+ # Initialize the OpenAI LLM
77
+ from llama_index.llms.openai import OpenAI
78
+ llm = OpenAI(model="gpt-3.5-turbo")
79
+
80
+ # Set up the agent
81
+ from llama_index.core.agent import FunctionCallingAgentWorker
82
+ from llama_index.core.agent import AgentRunner
83
+
84
+ agent_worker = FunctionCallingAgentWorker.from_tools(
85
+ tool_retriever=obj_retriever,
86
+ llm=llm,
87
+ verbose=True
88
+ )
89
+ agent = AgentRunner(agent_worker)
90
+
91
+ # Define the function to query the agent
92
  def ask_agent(question):
93
+ response = agent.query(question)
94
+ return str(response)
95
 
96
  # Create the Gradio interface
97
  iface = gr.Interface(
 
102
  )
103
 
104
  # Launch the Gradio app
105
+ iface.launch(share=True)
106
+
107
+ """
108
+ import streamlit as st
109
+ from transformers import pipeline
110
+
111
+ # Load your model
112
+ generator = pipeline('text-generation', model='gpt-3.5-turbo')
113
+
114
+ # Streamlit interface
115
+ st.title("Text Generator")
116
+ prompt = st.text_input("Enter your prompt:")
117
+ if st.button("Generate"):
118
+ result = generator(prompt, max_length=50)
119
+ st.write(result[0]['generated_text'])
120
+
121
+ """
122
+
123
+ """
124
+ import gradio as gr
125
+ from transformers import pipeline
126
+
127
+ # Load your model
128
+ generator = pipeline('text-generation', model='gpt-3.5-turbo')
129
+
130
+ # Define the function to generate text
131
+ def generate_text(prompt):
132
+ result = generator(prompt, max_length=50)
133
+ return result[0]['generated_text']
134
+
135
+ # Create the Gradio interface
136
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text", title="Text Generator")
137
+
138
+ # Launch the interface
139
+ iface.launch()
140
+ """
141
+ """
142
+ import torch
143
+ print(torch.__version__)
144
+ """