Spaces:
Runtime error
Runtime error
initial commit for the llm
Browse files- app.py +211 -0
- requirements.txt +4 -0
- utils.py +160 -0
app.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import time
|
4 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer, TextIteratorStreamer, BitsAndBytesConfig
|
5 |
+
import torch
|
6 |
+
from threading import Thread
|
7 |
+
import logging
|
8 |
+
import spaces
|
9 |
+
from functools import lru_cache
|
10 |
+
|
11 |
+
# Set up logging
|
12 |
+
logging.basicConfig(level=logging.INFO)
|
13 |
+
logger = logging.getLogger(__name__)
|
14 |
+
|
15 |
+
# Set an environment variable
|
16 |
+
HF_TOKEN = os.environ.get("HF_TOKEN", None)
|
17 |
+
|
18 |
+
DESCRIPTION = '''
|
19 |
+
<div>
|
20 |
+
<h1 style="text-align: center;">ContenteaseAI custom trained model</h1>
|
21 |
+
</div>
|
22 |
+
'''
|
23 |
+
|
24 |
+
LICENSE = """
|
25 |
+
<p/>
|
26 |
+
---
|
27 |
+
For more information, visit our [website](https://contentease.ai).
|
28 |
+
"""
|
29 |
+
|
30 |
+
PLACEHOLDER = """
|
31 |
+
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
32 |
+
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
|
33 |
+
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
|
34 |
+
</div>
|
35 |
+
"""
|
36 |
+
|
37 |
+
css = """
|
38 |
+
h1 {
|
39 |
+
text-align: center;
|
40 |
+
display: block;
|
41 |
+
}
|
42 |
+
"""
|
43 |
+
|
44 |
+
# Load the tokenizer and model with quantization
|
45 |
+
model_id = "meta-llama/Meta-Llama-3-8B-Instruct"
|
46 |
+
bnb_config = BitsAndBytesConfig(
|
47 |
+
load_in_4bit=True,
|
48 |
+
bnb_4bit_use_double_quant=True,
|
49 |
+
bnb_4bit_quant_type="nf4",
|
50 |
+
bnb_4bit_compute_dtype=torch.bfloat16
|
51 |
+
)
|
52 |
+
|
53 |
+
@lru_cache(maxsize=1)
|
54 |
+
def load_model_and_tokenizer():
|
55 |
+
try:
|
56 |
+
start_time = time.time()
|
57 |
+
logger.info("Loading tokenizer...")
|
58 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
59 |
+
logger.info("Loading model...")
|
60 |
+
model = AutoModelForCausalLM.from_pretrained(
|
61 |
+
model_id,
|
62 |
+
device_map="auto",
|
63 |
+
quantization_config=bnb_config,
|
64 |
+
torch_dtype=torch.bfloat16
|
65 |
+
)
|
66 |
+
model.generation_config.pad_token_id = tokenizer.pad_token_id
|
67 |
+
end_time = time.time()
|
68 |
+
logger.info(f"Model and tokenizer loaded successfully in {end_time - start_time} seconds.")
|
69 |
+
return model, tokenizer
|
70 |
+
except Exception as e:
|
71 |
+
logger.error(f"Error loading model or tokenizer: {e}")
|
72 |
+
raise
|
73 |
+
|
74 |
+
try:
|
75 |
+
model, tokenizer = load_model_and_tokenizer()
|
76 |
+
except Exception as e:
|
77 |
+
logger.error(f"Failed to load model and tokenizer: {e}")
|
78 |
+
raise
|
79 |
+
|
80 |
+
terminators = [
|
81 |
+
tokenizer.eos_token_id,
|
82 |
+
tokenizer.convert_tokens_to_ids("<|eot_id|>")
|
83 |
+
]
|
84 |
+
|
85 |
+
SYS_PROMPT = """
|
86 |
+
Extract all relevant keywords and add quantity from the following text and format the result in nested JSON, ignoring personal details and focusing only on the scope of work as shown in the example:
|
87 |
+
Good JSON example: {'lobby': {'frcm': {'replace': {'carpet': 1, 'carpet_pad': 1, 'base': 1, 'window_treatments': 1, 'artwork_and_decorative_accessories': 1, 'portable_lighting': 1, 'upholstered_furniture_and_decorative_pillows': 1, 'millwork': 1} } } }
|
88 |
+
Bad JSON example: {'lobby': { 'frcm': { 'replace': [ 'carpet', 'carpet_pad', 'base', 'window_treatments', 'artwork_and_decorative_accessories', 'portable_lighting', 'upholstered_furniture_and_decorative_pillows', 'millwork'] } } }
|
89 |
+
Make sure to fetch details from the provided text and ignore unnecessary information. The response should be in JSON format only, without any additional comments.
|
90 |
+
"""
|
91 |
+
|
92 |
+
def chunk_text(text, chunk_size=5000):
|
93 |
+
"""
|
94 |
+
Splits the input text into chunks of specified size.
|
95 |
+
|
96 |
+
Args:
|
97 |
+
text (str): The input text to be chunked.
|
98 |
+
chunk_size (int): The size of each chunk in tokens.
|
99 |
+
|
100 |
+
Returns:
|
101 |
+
list: A list of text chunks.
|
102 |
+
"""
|
103 |
+
words = text.split()
|
104 |
+
chunks = [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
|
105 |
+
return chunks
|
106 |
+
|
107 |
+
def combine_responses(responses):
|
108 |
+
"""
|
109 |
+
Combines the responses from all chunks into a final output string.
|
110 |
+
|
111 |
+
Args:
|
112 |
+
responses (list): A list of responses from each chunk.
|
113 |
+
|
114 |
+
Returns:
|
115 |
+
str: The combined output string.
|
116 |
+
"""
|
117 |
+
combined_output = " ".join(responses)
|
118 |
+
return combined_output
|
119 |
+
|
120 |
+
def generate_response_for_chunk(chunk, history, temperature, max_new_tokens):
|
121 |
+
start_time = time.time()
|
122 |
+
|
123 |
+
conversation = [{"role": "system", "content": SYS_PROMPT}]
|
124 |
+
for user, assistant in history:
|
125 |
+
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
126 |
+
conversation.append({"role": "user", "content": chunk})
|
127 |
+
|
128 |
+
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
129 |
+
|
130 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
131 |
+
|
132 |
+
generate_kwargs = dict(
|
133 |
+
input_ids=input_ids,
|
134 |
+
streamer=streamer,
|
135 |
+
max_new_tokens=max_new_tokens,
|
136 |
+
do_sample=True,
|
137 |
+
temperature=temperature,
|
138 |
+
eos_token_id=terminators,
|
139 |
+
pad_token_id=tokenizer.eos_token_id
|
140 |
+
)
|
141 |
+
if temperature == 0:
|
142 |
+
generate_kwargs['do_sample'] = False
|
143 |
+
|
144 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
145 |
+
t.start()
|
146 |
+
|
147 |
+
outputs = []
|
148 |
+
for text in streamer:
|
149 |
+
outputs.append(text)
|
150 |
+
|
151 |
+
end_time = time.time()
|
152 |
+
logger.info(f"Time taken for generating response for a chunk: {end_time - start_time} seconds")
|
153 |
+
|
154 |
+
return "".join(outputs)
|
155 |
+
|
156 |
+
@spaces.GPU(duration=110)
|
157 |
+
def chat_llama3_8b(message: str, history: list, temperature: float, max_new_tokens: int):
|
158 |
+
"""
|
159 |
+
Generate a streaming response using the llama3-8b model with chunking.
|
160 |
+
|
161 |
+
Args:
|
162 |
+
message (str): The input message.
|
163 |
+
history (list): The conversation history used by ChatInterface.
|
164 |
+
temperature (float): The temperature for generating the response.
|
165 |
+
max_new_tokens (int): The maximum number of new tokens to generate.
|
166 |
+
|
167 |
+
Returns:
|
168 |
+
str: The generated response.
|
169 |
+
"""
|
170 |
+
try:
|
171 |
+
start_time = time.time()
|
172 |
+
|
173 |
+
chunks = chunk_text(message)
|
174 |
+
responses = []
|
175 |
+
for chunk in chunks:
|
176 |
+
response = generate_response_for_chunk(chunk, history, temperature, max_new_tokens)
|
177 |
+
responses.append(response)
|
178 |
+
final_output = combine_responses(responses)
|
179 |
+
|
180 |
+
end_time = time.time()
|
181 |
+
logger.info(f"Total time taken for generating response: {end_time - start_time} seconds")
|
182 |
+
|
183 |
+
yield final_output
|
184 |
+
except Exception as e:
|
185 |
+
logger.error(f"Error generating response: {e}")
|
186 |
+
yield "An error occurred while generating the response. Please try again."
|
187 |
+
|
188 |
+
# Gradio block
|
189 |
+
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
|
190 |
+
|
191 |
+
with gr.Blocks(fill_height=True, css=css) as demo:
|
192 |
+
gr.Markdown(DESCRIPTION)
|
193 |
+
|
194 |
+
gr.ChatInterface(
|
195 |
+
fn=chat_llama3_8b,
|
196 |
+
chatbot=chatbot,
|
197 |
+
fill_height=True,
|
198 |
+
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
|
199 |
+
additional_inputs=[
|
200 |
+
gr.Slider(minimum=0, maximum=1, step=0.1, value=0.95, label="Temperature", render=False),
|
201 |
+
gr.Slider(minimum=128, maximum=2000, step=1, value=700, label="Max new tokens", render=False),
|
202 |
+
]
|
203 |
+
)
|
204 |
+
|
205 |
+
gr.Markdown(LICENSE)
|
206 |
+
|
207 |
+
if __name__ == "__main__":
|
208 |
+
try:
|
209 |
+
demo.launch(show_error=True)
|
210 |
+
except Exception as e:
|
211 |
+
logger.error(f"Error launching Gradio demo: {e}")
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
accelerate
|
2 |
+
transformers
|
3 |
+
SentencePiece
|
4 |
+
bitsandbytes
|
utils.py
ADDED
@@ -0,0 +1,160 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
from transformers import AutoConfig, AutoTokenizer, AutoModelForCausalLM, TextIteratorStreamer
|
5 |
+
from threading import Thread
|
6 |
+
from accelerate import init_empty_weights, infer_auto_device_map, disk_offload
|
7 |
+
|
8 |
+
# Set environment variables
|
9 |
+
HF_TOKEN = os.getenv("HF_TOKEN")
|
10 |
+
|
11 |
+
DESCRIPTION = '''
|
12 |
+
<div>
|
13 |
+
<h1 style="text-align: center;">ContenteaseAI custom trained model</h1>
|
14 |
+
</div>
|
15 |
+
'''
|
16 |
+
|
17 |
+
LICENSE = """
|
18 |
+
<p/>
|
19 |
+
|
20 |
+
---
|
21 |
+
For more information, visit our [website](https://contentease.ai).
|
22 |
+
"""
|
23 |
+
|
24 |
+
PLACEHOLDER = """
|
25 |
+
<div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
|
26 |
+
<h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
|
27 |
+
<p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
|
28 |
+
</div>
|
29 |
+
"""
|
30 |
+
|
31 |
+
css = """
|
32 |
+
h1 {
|
33 |
+
text-align: center;
|
34 |
+
display: block;
|
35 |
+
}
|
36 |
+
"""
|
37 |
+
|
38 |
+
def initialize_model(model_name, max_memory=None):
|
39 |
+
device = torch.device('cpu')
|
40 |
+
|
41 |
+
# Load model configuration
|
42 |
+
config = AutoConfig.from_pretrained(model_name)
|
43 |
+
|
44 |
+
with init_empty_weights():
|
45 |
+
# Initialize model with empty weights
|
46 |
+
model = AutoModelForCausalLM.from_config(config)
|
47 |
+
|
48 |
+
# Create device map based on memory constraints
|
49 |
+
device_map = infer_auto_device_map(
|
50 |
+
model, max_memory=max_memory, no_split_module_classes=["GPTNeoXLayer"], dtype="float16"
|
51 |
+
)
|
52 |
+
|
53 |
+
# Determine if offloading is needed
|
54 |
+
needs_offloading = any(device == 'disk' for device in device_map.values())
|
55 |
+
|
56 |
+
if needs_offloading:
|
57 |
+
# Load model for offloading
|
58 |
+
model = AutoModelForCausalLM.from_pretrained(
|
59 |
+
model_name, device_map=device_map, offload_folder="offload",
|
60 |
+
offload_state_dict=True, torch_dtype=torch.float16
|
61 |
+
)
|
62 |
+
offload_directory = "offload/"
|
63 |
+
# Offload model to disk
|
64 |
+
disk_offload(model=model, offload_dir=offload_directory)
|
65 |
+
else:
|
66 |
+
# Load model normally to specified device
|
67 |
+
model = AutoModelForCausalLM.from_pretrained(
|
68 |
+
model_name, torch_dtype=torch.float16
|
69 |
+
)
|
70 |
+
model.to(device)
|
71 |
+
|
72 |
+
return model
|
73 |
+
|
74 |
+
try:
|
75 |
+
# Initialize the model and tokenizer
|
76 |
+
model_name = "meta-llama/Meta-Llama-3-8B-Instruct"
|
77 |
+
model = initialize_model(model_name, max_memory={"cpu": "GiB"})
|
78 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, use_auth_token=HF_TOKEN)
|
79 |
+
except Exception as e:
|
80 |
+
print(f"Error initializing model: {e}")
|
81 |
+
exit(1)
|
82 |
+
|
83 |
+
terminators = [
|
84 |
+
tokenizer.eos_token_id,
|
85 |
+
tokenizer.convert_tokens_to_ids("")
|
86 |
+
]
|
87 |
+
|
88 |
+
def chat_llama3_8b(message: str, history: list, temperature: float, max_new_tokens: int) -> str:
|
89 |
+
"""
|
90 |
+
Generate a streaming response using the llama3-8b model.
|
91 |
+
Args:
|
92 |
+
message (str): The input message.
|
93 |
+
history (list): The conversation history used by ChatInterface.
|
94 |
+
temperature (float): The temperature for generating the response.
|
95 |
+
max_new_tokens (int): The maximum number of new tokens to generate.
|
96 |
+
Returns:
|
97 |
+
str: The generated response.
|
98 |
+
"""
|
99 |
+
conversation = []
|
100 |
+
message += " Extract all relevant keywords and add quantity from the following text and format the result in nested JSON:"
|
101 |
+
for user, assistant in history:
|
102 |
+
conversation.extend([{"role": "user", "content": user}, {"role": "assistant", "content": assistant}])
|
103 |
+
conversation.append({"role": "user", "content": message})
|
104 |
+
|
105 |
+
input_ids = tokenizer.apply_chat_template(conversation, return_tensors="pt").to(model.device)
|
106 |
+
|
107 |
+
streamer = TextIteratorStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)
|
108 |
+
|
109 |
+
generate_kwargs = dict(
|
110 |
+
input_ids=input_ids,
|
111 |
+
streamer=streamer,
|
112 |
+
max_new_tokens=max_new_tokens,
|
113 |
+
do_sample=True,
|
114 |
+
temperature=temperature,
|
115 |
+
eos_token_id=terminators,
|
116 |
+
)
|
117 |
+
if temperature == 0:
|
118 |
+
generate_kwargs['do_sample'] = False
|
119 |
+
|
120 |
+
t = Thread(target=model.generate, kwargs=generate_kwargs)
|
121 |
+
t.start()
|
122 |
+
|
123 |
+
outputs = []
|
124 |
+
for text in streamer:
|
125 |
+
outputs.append(text)
|
126 |
+
yield "".join(outputs)
|
127 |
+
|
128 |
+
# Gradio block
|
129 |
+
chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
|
130 |
+
|
131 |
+
with gr.Blocks(fill_height=True, css=css) as demo:
|
132 |
+
gr.Markdown(DESCRIPTION)
|
133 |
+
gr.ChatInterface(
|
134 |
+
fn=chat_llama3_8b,
|
135 |
+
chatbot=chatbot,
|
136 |
+
fill_height=True,
|
137 |
+
additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
|
138 |
+
additional_inputs=[
|
139 |
+
gr.Slider(
|
140 |
+
minimum=0,
|
141 |
+
maximum=1,
|
142 |
+
step=0.1,
|
143 |
+
value=0.95,
|
144 |
+
label="Temperature",
|
145 |
+
render=False
|
146 |
+
),
|
147 |
+
gr.Slider(
|
148 |
+
minimum=128,
|
149 |
+
maximum=9012,
|
150 |
+
step=1,
|
151 |
+
value=512,
|
152 |
+
label="Max new tokens",
|
153 |
+
render=False
|
154 |
+
),
|
155 |
+
]
|
156 |
+
)
|
157 |
+
gr.Markdown(LICENSE)
|
158 |
+
|
159 |
+
if __name__ == "__main__":
|
160 |
+
demo.launch(server_port=8000, share=True)
|