Spaces:
Sleeping
Sleeping
Commit
·
123ba7e
1
Parent(s):
df95248
feat: include a temporary LLM class
Browse files- app_langchain.py +6 -124
- src/dev_llm.py +23 -0
app_langchain.py
CHANGED
@@ -1,13 +1,10 @@
|
|
1 |
import streamlit as st
|
2 |
-
import yaml
|
3 |
-
import requests
|
4 |
-
import re
|
5 |
import os
|
6 |
|
7 |
from langchain_core.prompts import PromptTemplate
|
8 |
-
import streamlit as st
|
9 |
|
10 |
-
from src.
|
|
|
11 |
|
12 |
# Get HuggingFace API key
|
13 |
api_key_name = "HUGGINGFACE_HUB_TOKEN"
|
@@ -15,127 +12,12 @@ api_key = os.getenv(api_key_name)
|
|
15 |
if api_key is None:
|
16 |
st.error(f"Failed to read `{api_key_name}`. Ensure the token is correctly located")
|
17 |
|
18 |
-
# Load in model
|
19 |
-
|
20 |
-
config_keys = ["system_message", "model_id", "template"]
|
21 |
-
|
22 |
-
with open(model_config_dir, "r") as file:
|
23 |
-
model_config = yaml.safe_load(file)
|
24 |
-
|
25 |
-
for var in model_config.keys():
|
26 |
-
if var not in config_keys:
|
27 |
-
raise ValueError(f"`{var}` key missing from `{model_config_dir}`")
|
28 |
-
|
29 |
-
system_message = model_config["system_message"]
|
30 |
-
model_id = model_config["model_id"]
|
31 |
-
template = model_config["template"]
|
32 |
|
33 |
-
|
34 |
template=template,
|
35 |
input_variables=["system_message", "user_message"]
|
36 |
)
|
37 |
|
38 |
-
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
-
|
51 |
-
|
52 |
-
|
53 |
-
def query(payload, model_id):
|
54 |
-
headers = {"Authorization": f"Bearer {api_key}"}
|
55 |
-
API_URL = f"https://api-inference.huggingface.co/models/{model_id}"
|
56 |
-
response = requests.post(API_URL, headers=headers, json=payload)
|
57 |
-
return response.json()
|
58 |
-
|
59 |
-
|
60 |
-
def prompt_generator(system_message, user_message):
|
61 |
-
return f"""
|
62 |
-
<s>[INST] <<SYS>>
|
63 |
-
{system_message}
|
64 |
-
<</SYS>>
|
65 |
-
{user_message} [/INST]
|
66 |
-
"""
|
67 |
-
|
68 |
-
|
69 |
-
# Pattern to clean up text response from API
|
70 |
-
pattern = r".*\[/INST\]([\s\S]*)$"
|
71 |
-
|
72 |
-
# Initialize chat history
|
73 |
-
if "messages" not in st.session_state:
|
74 |
-
st.session_state.messages = []
|
75 |
-
|
76 |
-
# Include PDF upload ability
|
77 |
-
pdf_upload = st.file_uploader(
|
78 |
-
"Upload a .PDF here",
|
79 |
-
type=".pdf",
|
80 |
-
)
|
81 |
-
|
82 |
-
if pdf_upload is not None:
|
83 |
-
pdf_text = get_pdf_text(pdf_upload)
|
84 |
-
|
85 |
-
|
86 |
-
if "key_inputs" not in st.session_state:
|
87 |
-
st.session_state.key_inputs = {}
|
88 |
-
|
89 |
-
col1, col2, col3 = st.columns([3, 3, 2])
|
90 |
-
|
91 |
-
with col1:
|
92 |
-
key_name = st.text_input("Key/Column Name (e.g. patient_name)", key="key_name")
|
93 |
-
|
94 |
-
with col2:
|
95 |
-
key_description = st.text_area(
|
96 |
-
"*(Optional) Description of key/column", key="key_description"
|
97 |
-
)
|
98 |
-
|
99 |
-
with col3:
|
100 |
-
if st.button("Extract this column"):
|
101 |
-
if key_description:
|
102 |
-
st.session_state.key_inputs[key_name] = key_description
|
103 |
-
else:
|
104 |
-
st.session_state.key_inputs[key_name] = "No further description provided"
|
105 |
-
|
106 |
-
if st.session_state.key_inputs:
|
107 |
-
keys_title = st.write("\nKeys/Columns for extraction:")
|
108 |
-
keys_values = st.write(st.session_state.key_inputs)
|
109 |
-
|
110 |
-
with st.spinner("Extracting requested data"):
|
111 |
-
if st.button("Extract data!"):
|
112 |
-
user_message = f"""
|
113 |
-
Use the text provided and denoted by 3 backticks ```{pdf_text}```.
|
114 |
-
Extract the following columns and return a table that could be uploaded to an SQL database.
|
115 |
-
{'; '.join([key + ': ' + st.session_state.key_inputs[key] for key in st.session_state.key_inputs])}
|
116 |
-
"""
|
117 |
-
the_prompt = prompt_generator(
|
118 |
-
system_message=system_message, user_message=user_message
|
119 |
-
)
|
120 |
-
response = query(
|
121 |
-
{
|
122 |
-
"inputs": the_prompt,
|
123 |
-
"parameters": {"max_new_tokens": 500, "temperature": 0.1},
|
124 |
-
},
|
125 |
-
model_id,
|
126 |
-
)
|
127 |
-
try:
|
128 |
-
match = re.search(
|
129 |
-
pattern, response[0]["generated_text"], re.MULTILINE | re.DOTALL
|
130 |
-
)
|
131 |
-
if match:
|
132 |
-
response = match.group(1).strip()
|
133 |
-
|
134 |
-
response = eval(response)
|
135 |
-
|
136 |
-
st.success("Data Extracted Successfully!")
|
137 |
-
st.write(response)
|
138 |
-
except:
|
139 |
-
st.error("Unable to connect to model. Please try again later.")
|
140 |
-
|
141 |
-
# st.success(f"Data Extracted!")
|
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
2 |
import os
|
3 |
|
4 |
from langchain_core.prompts import PromptTemplate
|
|
|
5 |
|
6 |
+
from src.utils import load_config_values
|
7 |
+
from src.dev_llm import FakeLLM
|
8 |
|
9 |
# Get HuggingFace API key
|
10 |
api_key_name = "HUGGINGFACE_HUB_TOKEN"
|
|
|
12 |
if api_key is None:
|
13 |
st.error(f"Failed to read `{api_key_name}`. Ensure the token is correctly located")
|
14 |
|
15 |
+
# Load in model and pipeline configuration values
|
16 |
+
system_message, model_id, template = load_config_values()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
|
18 |
+
prompt = PromptTemplate(
|
19 |
template=template,
|
20 |
input_variables=["system_message", "user_message"]
|
21 |
)
|
22 |
|
23 |
+
llm = FakeLLM()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
src/dev_llm.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain_core.language_models.llms import LLM
|
2 |
+
from langchain_core.callbacks.manager import CallbackManagerForLLMRun
|
3 |
+
from typing import Any, List, Optional
|
4 |
+
|
5 |
+
class FakeLLM(LLM):
|
6 |
+
"""
|
7 |
+
An LLM class that returns nothing of value and is a temp class designed to work in Langchain.
|
8 |
+
"""
|
9 |
+
def _call(
|
10 |
+
self,
|
11 |
+
prompt: str,
|
12 |
+
stop: Optional[List[str]] = None,
|
13 |
+
run_manager: Optional[CallbackManagerForLLMRun] = None,
|
14 |
+
**kwargs: Any,
|
15 |
+
) -> str:
|
16 |
+
if stop is not None:
|
17 |
+
raise ValueError("stop kwargs are not permitted.")
|
18 |
+
return prompt
|
19 |
+
|
20 |
+
@property
|
21 |
+
def _llm_type(self) -> str:
|
22 |
+
"""Get the type of language model used by this chat model. Used for logging purposes only."""
|
23 |
+
return "custom"
|