awacke1 commited on
Commit
332d4d1
1 Parent(s): 0a5fc1f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +45 -180
app.py CHANGED
@@ -1,19 +1,51 @@
 
 
 
 
 
 
 
 
 
 
1
  import requests
2
  import streamlit as st
3
- import os
 
 
 
 
 
 
 
4
  from huggingface_hub import InferenceClient
 
 
 
 
 
 
 
 
 
 
 
5
 
6
- API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud'
 
7
  API_KEY = os.getenv('API_KEY')
8
-
9
  headers = {
10
  "Authorization": f"Bearer {API_KEY}",
11
  "Content-Type": "application/json"
12
  }
13
-
14
- # Prompt Set of Examples:
15
  prompt = f"Write instructions to teach anyone to write a discharge plan. List the entities, features and relationships to CCDA and FHIR objects in boldface."
 
 
 
 
 
16
 
 
17
  def StreamLLMChatResponse(prompt):
18
  endpoint_url = API_URL
19
  hf_token = API_KEY
@@ -39,7 +71,6 @@ def StreamLLMChatResponse(prompt):
39
  collected_chunks.append(r.token.text)
40
  chunk_message = r.token.text
41
  collected_messages.append(chunk_message)
42
-
43
  try:
44
  report.append(r.token.text)
45
  if len(r.token.text) > 0:
@@ -56,52 +87,6 @@ def query(payload):
56
  def get_output(prompt):
57
  return query({"inputs": prompt})
58
 
59
-
60
-
61
-
62
-
63
- import streamlit as st
64
- import openai
65
- import os
66
- import base64
67
- import glob
68
- import json
69
- import mistune
70
- import pytz
71
- import math
72
- import requests
73
- import time
74
- import re
75
- import textract
76
- import zipfile # New import for zipping files
77
-
78
-
79
- from datetime import datetime
80
- from openai import ChatCompletion
81
- from xml.etree import ElementTree as ET
82
- from bs4 import BeautifulSoup
83
- from collections import deque
84
- from audio_recorder_streamlit import audio_recorder
85
- from dotenv import load_dotenv
86
- from PyPDF2 import PdfReader
87
- from langchain.text_splitter import CharacterTextSplitter
88
- from langchain.embeddings import OpenAIEmbeddings
89
- from langchain.vectorstores import FAISS
90
- from langchain.chat_models import ChatOpenAI
91
- from langchain.memory import ConversationBufferMemory
92
- from langchain.chains import ConversationalRetrievalChain
93
- from templates import css, bot_template, user_template
94
-
95
- # page config and sidebar declares up front allow all other functions to see global class variables
96
- st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
97
- should_save = st.sidebar.checkbox("💾 Save", value=True)
98
-
99
- def generate_filename_old(prompt, file_type):
100
- central = pytz.timezone('US/Central')
101
- safe_date_time = datetime.now(central).strftime("%m%d_%H%M") # Date and time DD-HHMM
102
- safe_prompt = "".join(x for x in prompt if x.isalnum())[:90] # Limit file name size and trim whitespace
103
- return f"{safe_date_time}_{safe_prompt}.{file_type}" # Return a safe file name
104
-
105
  def generate_filename(prompt, file_type):
106
  central = pytz.timezone('US/Central')
107
  safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
@@ -121,10 +106,7 @@ def transcribe_audio(openai_key, file_path, model):
121
  st.write(response.json())
122
  chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
123
  transcript = response.json().get('text')
124
- #st.write('Responses:')
125
- #st.write(chatResponse)
126
  filename = generate_filename(transcript, 'txt')
127
- #create_file(filename, transcript, chatResponse)
128
  response = chatResponse
129
  user_prompt = transcript
130
  create_file(filename, user_prompt, response, should_save)
@@ -147,47 +129,21 @@ def save_and_play_audio(audio_recorder):
147
  def create_file(filename, prompt, response, should_save=True):
148
  if not should_save:
149
  return
150
-
151
- # Step 2: Extract base filename without extension
152
  base_filename, ext = os.path.splitext(filename)
153
-
154
- # Step 3: Check if the response contains Python code
155
  has_python_code = bool(re.search(r"```python([\s\S]*?)```", response))
156
-
157
- # Step 4: Write files based on type
158
  if ext in ['.txt', '.htm', '.md']:
159
- # Create Prompt file
160
  with open(f"{base_filename}-Prompt.txt", 'w') as file:
161
  file.write(prompt)
162
-
163
- # Create Response file
164
  with open(f"{base_filename}-Response.md", 'w') as file:
165
  file.write(response)
166
-
167
- # Create Code file if Python code is present
168
  if has_python_code:
169
- # Extract Python code from the response
170
  python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip()
171
-
172
  with open(f"{base_filename}-Code.py", 'w') as file:
173
  file.write(python_code)
174
-
175
-
176
- def create_file_old(filename, prompt, response, should_save=True):
177
- if not should_save:
178
- return
179
- if filename.endswith(".txt"):
180
- with open(filename, 'w') as file:
181
- file.write(f"{prompt}\n{response}")
182
- elif filename.endswith(".htm"):
183
- with open(filename, 'w') as file:
184
- file.write(f"{prompt} {response}")
185
- elif filename.endswith(".md"):
186
- with open(filename, 'w') as file:
187
- file.write(f"{prompt}\n\n{response}")
188
 
189
  def truncate_document(document, length):
190
  return document[:length]
 
191
  def divide_document(document, max_length):
192
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
193
 
@@ -252,35 +208,23 @@ def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
252
  conversation.append({'role': 'user', 'content': prompt})
253
  if len(document_section)>0:
254
  conversation.append({'role': 'assistant', 'content': document_section})
255
-
256
  start_time = time.time()
257
  report = []
258
  res_box = st.empty()
259
  collected_chunks = []
260
  collected_messages = []
261
-
262
- for chunk in openai.ChatCompletion.create(
263
- model='gpt-3.5-turbo',
264
- messages=conversation,
265
- temperature=0.5,
266
- stream=True
267
- ):
268
-
269
- collected_chunks.append(chunk) # save the event response
270
- chunk_message = chunk['choices'][0]['delta'] # extract the message
271
- collected_messages.append(chunk_message) # save the message
272
-
273
  content=chunk["choices"][0].get("delta",{}).get("content")
274
-
275
  try:
276
  report.append(content)
277
  if len(content) > 0:
278
  result = "".join(report).strip()
279
- #result = result.replace("\n", "")
280
  res_box.markdown(f'*{result}*')
281
  except:
282
  st.write(' ')
283
-
284
  full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
285
  st.write("Elapsed time:")
286
  st.write(time.time() - start_time)
@@ -295,7 +239,6 @@ def chat_with_file_contents(prompt, file_content, model_choice='gpt-3.5-turbo'):
295
  return response['choices'][0]['message']['content']
296
 
297
  def extract_mime_type(file):
298
- # Check if the input is a string
299
  if isinstance(file, str):
300
  pattern = r"type='(.*?)'"
301
  match = re.search(pattern, file)
@@ -303,15 +246,11 @@ def extract_mime_type(file):
303
  return match.group(1)
304
  else:
305
  raise ValueError(f"Unable to extract MIME type from {file}")
306
- # If it's not a string, assume it's a streamlit.UploadedFile object
307
  elif isinstance(file, streamlit.UploadedFile):
308
  return file.type
309
  else:
310
  raise TypeError("Input should be a string or a streamlit.UploadedFile object")
311
 
312
- from io import BytesIO
313
- import re
314
-
315
  def extract_file_extension(file):
316
  # get the file name directly from the UploadedFile object
317
  file_name = file.name
@@ -326,10 +265,7 @@ def pdf2txt(docs):
326
  text = ""
327
  for file in docs:
328
  file_extension = extract_file_extension(file)
329
- # print the file extension
330
  st.write(f"File type extension: {file_extension}")
331
-
332
- # read the file according to its extension
333
  try:
334
  if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']:
335
  text += file.getvalue().decode('utf-8')
@@ -340,20 +276,6 @@ def pdf2txt(docs):
340
  text += pdf.pages[page].extract_text() # new PyPDF2 syntax
341
  except Exception as e:
342
  st.write(f"Error processing file {file.name}: {e}")
343
-
344
- return text
345
-
346
- def pdf2txt_old(pdf_docs):
347
- st.write(pdf_docs)
348
- for file in pdf_docs:
349
- mime_type = extract_mime_type(file)
350
- st.write(f"MIME type of file: {mime_type}")
351
-
352
- text = ""
353
- for pdf in pdf_docs:
354
- pdf_reader = PdfReader(pdf)
355
- for page in pdf_reader.pages:
356
- text += page.extract_text()
357
  return text
358
 
359
  def txt2chunks(text):
@@ -376,13 +298,10 @@ def process_user_input(user_question):
376
  for i, message in enumerate(st.session_state.chat_history):
377
  template = user_template if i % 2 == 0 else bot_template
378
  st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
379
- # Save file output from PDF query results
380
  filename = generate_filename(user_question, 'txt')
381
- #create_file(filename, user_question, message.content)
382
  response = message.content
383
  user_prompt = user_question
384
  create_file(filename, user_prompt, response, should_save)
385
- #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
386
 
387
  def divide_prompt(prompt, max_length):
388
  words = prompt.split()
@@ -391,78 +310,53 @@ def divide_prompt(prompt, max_length):
391
  current_length = 0
392
  for word in words:
393
  if len(word) + current_length <= max_length:
394
- current_length += len(word) + 1 # Adding 1 to account for spaces
395
  current_chunk.append(word)
396
  else:
397
  chunks.append(' '.join(current_chunk))
398
  current_chunk = [word]
399
  current_length = len(word)
400
- chunks.append(' '.join(current_chunk)) # Append the final chunk
401
  return chunks
402
 
403
  def create_zip_of_files(files):
404
- """
405
- Create a zip file from a list of files.
406
- """
407
  zip_name = "all_files.zip"
408
  with zipfile.ZipFile(zip_name, 'w') as zipf:
409
  for file in files:
410
  zipf.write(file)
411
  return zip_name
412
 
413
-
414
  def get_zip_download_link(zip_file):
415
- """
416
- Generate a link to download the zip file.
417
- """
418
  with open(zip_file, 'rb') as f:
419
  data = f.read()
420
  b64 = base64.b64encode(data).decode()
421
  href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
422
  return href
423
 
424
-
425
-
426
  def main():
427
  st.title("Medical Llama Test Bench with Inference Endpoints Llama 7B")
428
  prompt = f"Write instructions to teach anyone to write a discharge plan. List the entities, features and relationships to CCDA and FHIR objects in boldface."
429
  example_input = st.text_input("Enter your example text:", value=prompt)
430
-
431
  if st.button("Run Prompt With Dr Llama"):
432
  try:
433
  StreamLLMChatResponse(example_input)
434
  except:
435
  st.write('Dr. Llama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
436
-
437
- # clip ---
438
-
439
  openai.api_key = os.getenv('OPENAI_KEY')
440
-
441
- # File type for output, model choice
442
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
443
  choice = st.sidebar.selectbox("Output File Type:", menu)
444
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
445
-
446
- # Audio, transcribe, GPT:
447
  filename = save_and_play_audio(audio_recorder)
448
  if filename is not None:
449
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
450
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
451
  filename = None
452
-
453
- # prompt interfaces
454
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
455
-
456
- # file section interface for prompts against large documents as context
457
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
458
  with collength:
459
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
460
  with colupload:
461
  uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
462
-
463
-
464
- # Document section chat
465
-
466
  document_sections = deque()
467
  document_responses = {}
468
  if uploaded_file is not None:
@@ -480,54 +374,36 @@ def main():
480
  else:
481
  if st.button(f"Chat about Section {i+1}"):
482
  st.write('Reasoning with your inputs...')
483
- response = chat_with_model(user_prompt, section, model_choice) # *************************************
484
  st.write('Response:')
485
  st.write(response)
486
  document_responses[i] = response
487
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
488
  create_file(filename, user_prompt, response, should_save)
489
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
490
-
491
  if st.button('💬 Chat'):
492
  st.write('Reasoning with your inputs...')
493
-
494
- #response = chat_with_model(user_prompt, ''.join(list(document_sections,)), model_choice) # *************************************
495
-
496
- # Divide the user_prompt into smaller sections
497
  user_prompt_sections = divide_prompt(user_prompt, max_length)
498
  full_response = ''
499
  for prompt_section in user_prompt_sections:
500
- # Process each section with the model
501
  response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
502
  full_response += response + '\n' # Combine the responses
503
-
504
- #st.write('Response:')
505
- #st.write(full_response)
506
-
507
  response = full_response
508
  st.write('Response:')
509
  st.write(response)
510
-
511
  filename = generate_filename(user_prompt, choice)
512
  create_file(filename, user_prompt, response, should_save)
513
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
514
-
515
  all_files = glob.glob("*.*")
516
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
517
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
518
-
519
- # Added "Delete All" button
520
  if st.sidebar.button("🗑 Delete All"):
521
  for file in all_files:
522
  os.remove(file)
523
  st.experimental_rerun()
524
-
525
- # Added "Download All" button
526
  if st.sidebar.button("⬇️ Download All"):
527
  zip_file = create_zip_of_files(all_files)
528
  st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
529
-
530
- # Sidebar of Files Saving History and surfacing files as context of prompts and responses
531
  file_contents=''
532
  next_action=''
533
  for file in all_files:
@@ -553,7 +429,6 @@ def main():
553
  if st.button("🗑", key="delete_"+file):
554
  os.remove(file)
555
  st.experimental_rerun()
556
-
557
  if len(file_contents) > 0:
558
  if next_action=='open':
559
  file_content_area = st.text_area("File Contents:", file_contents, height=500)
@@ -565,19 +440,14 @@ def main():
565
  response = chat_with_model(user_prompt, file_contents, model_choice)
566
  filename = generate_filename(file_contents, choice)
567
  create_file(filename, user_prompt, response, should_save)
568
-
569
  st.experimental_rerun()
570
- #st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
571
-
572
 
573
  load_dotenv()
574
  st.write(css, unsafe_allow_html=True)
575
-
576
  st.header("Chat with documents :books:")
577
  user_question = st.text_input("Ask a question about your documents:")
578
  if user_question:
579
  process_user_input(user_question)
580
-
581
  with st.sidebar:
582
  st.subheader("Your documents")
583
  docs = st.file_uploader("import documents", accept_multiple_files=True)
@@ -591,11 +461,6 @@ with st.sidebar:
591
  st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
592
  filename = generate_filename(raw, 'txt')
593
  create_file(filename, raw, '', should_save)
594
- #create_file(filename, raw, '')
595
-
596
-
597
-
598
-
599
 
600
  if __name__ == "__main__":
601
  main()
 
1
+ # Imports
2
+ import base64
3
+ import glob
4
+ import json
5
+ import math
6
+ import mistune
7
+ import openai
8
+ import os
9
+ import pytz
10
+ import re
11
  import requests
12
  import streamlit as st
13
+ import textract
14
+ import time
15
+ import zipfile
16
+ from audio_recorder_streamlit import audio_recorder
17
+ from bs4 import BeautifulSoup
18
+ from collections import deque
19
+ from datetime import datetime
20
+ from dotenv import load_dotenv
21
  from huggingface_hub import InferenceClient
22
+ from io import BytesIO
23
+ from langchain.chat_models import ChatOpenAI
24
+ from langchain.chains import ConversationalRetrievalChain
25
+ from langchain.embeddings import OpenAIEmbeddings
26
+ from langchain.memory import ConversationBufferMemory
27
+ from langchain.text_splitter import CharacterTextSplitter
28
+ from langchain.vectorstores import FAISS
29
+ from openai import ChatCompletion
30
+ from PyPDF2 import PdfReader
31
+ from templates import bot_template, css, user_template
32
+ from xml.etree import ElementTree as ET
33
 
34
+ # Constants
35
+ API_URL = 'https://qe55p8afio98s0u3.us-east-1.aws.endpoints.huggingface.cloud' # Dr Llama
36
  API_KEY = os.getenv('API_KEY')
 
37
  headers = {
38
  "Authorization": f"Bearer {API_KEY}",
39
  "Content-Type": "application/json"
40
  }
 
 
41
  prompt = f"Write instructions to teach anyone to write a discharge plan. List the entities, features and relationships to CCDA and FHIR objects in boldface."
42
+ # page config and sidebar declares up front allow all other functions to see global class variables
43
+ st.set_page_config(page_title="GPT Streamlit Document Reasoner", layout="wide")
44
+
45
+ # UI Controls
46
+ should_save = st.sidebar.checkbox("💾 Save", value=True)
47
 
48
+ # Functions
49
  def StreamLLMChatResponse(prompt):
50
  endpoint_url = API_URL
51
  hf_token = API_KEY
 
71
  collected_chunks.append(r.token.text)
72
  chunk_message = r.token.text
73
  collected_messages.append(chunk_message)
 
74
  try:
75
  report.append(r.token.text)
76
  if len(r.token.text) > 0:
 
87
  def get_output(prompt):
88
  return query({"inputs": prompt})
89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
  def generate_filename(prompt, file_type):
91
  central = pytz.timezone('US/Central')
92
  safe_date_time = datetime.now(central).strftime("%m%d_%H%M")
 
106
  st.write(response.json())
107
  chatResponse = chat_with_model(response.json().get('text'), '') # *************************************
108
  transcript = response.json().get('text')
 
 
109
  filename = generate_filename(transcript, 'txt')
 
110
  response = chatResponse
111
  user_prompt = transcript
112
  create_file(filename, user_prompt, response, should_save)
 
129
  def create_file(filename, prompt, response, should_save=True):
130
  if not should_save:
131
  return
 
 
132
  base_filename, ext = os.path.splitext(filename)
 
 
133
  has_python_code = bool(re.search(r"```python([\s\S]*?)```", response))
 
 
134
  if ext in ['.txt', '.htm', '.md']:
 
135
  with open(f"{base_filename}-Prompt.txt", 'w') as file:
136
  file.write(prompt)
 
 
137
  with open(f"{base_filename}-Response.md", 'w') as file:
138
  file.write(response)
 
 
139
  if has_python_code:
 
140
  python_code = re.findall(r"```python([\s\S]*?)```", response)[0].strip()
 
141
  with open(f"{base_filename}-Code.py", 'w') as file:
142
  file.write(python_code)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
  def truncate_document(document, length):
145
  return document[:length]
146
+
147
  def divide_document(document, max_length):
148
  return [document[i:i+max_length] for i in range(0, len(document), max_length)]
149
 
 
208
  conversation.append({'role': 'user', 'content': prompt})
209
  if len(document_section)>0:
210
  conversation.append({'role': 'assistant', 'content': document_section})
 
211
  start_time = time.time()
212
  report = []
213
  res_box = st.empty()
214
  collected_chunks = []
215
  collected_messages = []
216
+ for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
217
+ collected_chunks.append(chunk)
218
+ chunk_message = chunk['choices'][0]['delta']
219
+ collected_messages.append(chunk_message)
 
 
 
 
 
 
 
 
220
  content=chunk["choices"][0].get("delta",{}).get("content")
 
221
  try:
222
  report.append(content)
223
  if len(content) > 0:
224
  result = "".join(report).strip()
 
225
  res_box.markdown(f'*{result}*')
226
  except:
227
  st.write(' ')
 
228
  full_reply_content = ''.join([m.get('content', '') for m in collected_messages])
229
  st.write("Elapsed time:")
230
  st.write(time.time() - start_time)
 
239
  return response['choices'][0]['message']['content']
240
 
241
  def extract_mime_type(file):
 
242
  if isinstance(file, str):
243
  pattern = r"type='(.*?)'"
244
  match = re.search(pattern, file)
 
246
  return match.group(1)
247
  else:
248
  raise ValueError(f"Unable to extract MIME type from {file}")
 
249
  elif isinstance(file, streamlit.UploadedFile):
250
  return file.type
251
  else:
252
  raise TypeError("Input should be a string or a streamlit.UploadedFile object")
253
 
 
 
 
254
  def extract_file_extension(file):
255
  # get the file name directly from the UploadedFile object
256
  file_name = file.name
 
265
  text = ""
266
  for file in docs:
267
  file_extension = extract_file_extension(file)
 
268
  st.write(f"File type extension: {file_extension}")
 
 
269
  try:
270
  if file_extension.lower() in ['py', 'txt', 'html', 'htm', 'xml', 'json']:
271
  text += file.getvalue().decode('utf-8')
 
276
  text += pdf.pages[page].extract_text() # new PyPDF2 syntax
277
  except Exception as e:
278
  st.write(f"Error processing file {file.name}: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
279
  return text
280
 
281
  def txt2chunks(text):
 
298
  for i, message in enumerate(st.session_state.chat_history):
299
  template = user_template if i % 2 == 0 else bot_template
300
  st.write(template.replace("{{MSG}}", message.content), unsafe_allow_html=True)
 
301
  filename = generate_filename(user_question, 'txt')
 
302
  response = message.content
303
  user_prompt = user_question
304
  create_file(filename, user_prompt, response, should_save)
 
305
 
306
  def divide_prompt(prompt, max_length):
307
  words = prompt.split()
 
310
  current_length = 0
311
  for word in words:
312
  if len(word) + current_length <= max_length:
313
+ current_length += len(word) + 1
314
  current_chunk.append(word)
315
  else:
316
  chunks.append(' '.join(current_chunk))
317
  current_chunk = [word]
318
  current_length = len(word)
319
+ chunks.append(' '.join(current_chunk))
320
  return chunks
321
 
322
  def create_zip_of_files(files):
 
 
 
323
  zip_name = "all_files.zip"
324
  with zipfile.ZipFile(zip_name, 'w') as zipf:
325
  for file in files:
326
  zipf.write(file)
327
  return zip_name
328
 
 
329
  def get_zip_download_link(zip_file):
 
 
 
330
  with open(zip_file, 'rb') as f:
331
  data = f.read()
332
  b64 = base64.b64encode(data).decode()
333
  href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
334
  return href
335
 
 
 
336
  def main():
337
  st.title("Medical Llama Test Bench with Inference Endpoints Llama 7B")
338
  prompt = f"Write instructions to teach anyone to write a discharge plan. List the entities, features and relationships to CCDA and FHIR objects in boldface."
339
  example_input = st.text_input("Enter your example text:", value=prompt)
 
340
  if st.button("Run Prompt With Dr Llama"):
341
  try:
342
  StreamLLMChatResponse(example_input)
343
  except:
344
  st.write('Dr. Llama is asleep. Starting up now on A10 - please give 5 minutes then retry as KEDA scales up from zero to activate running container(s).')
 
 
 
345
  openai.api_key = os.getenv('OPENAI_KEY')
 
 
346
  menu = ["txt", "htm", "xlsx", "csv", "md", "py"]
347
  choice = st.sidebar.selectbox("Output File Type:", menu)
348
  model_choice = st.sidebar.radio("Select Model:", ('gpt-3.5-turbo', 'gpt-3.5-turbo-0301'))
 
 
349
  filename = save_and_play_audio(audio_recorder)
350
  if filename is not None:
351
  transcription = transcribe_audio(openai.api_key, filename, "whisper-1")
352
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
353
  filename = None
 
 
354
  user_prompt = st.text_area("Enter prompts, instructions & questions:", '', height=100)
 
 
355
  collength, colupload = st.columns([2,3]) # adjust the ratio as needed
356
  with collength:
357
  max_length = st.slider("File section length for large files", min_value=1000, max_value=128000, value=12000, step=1000)
358
  with colupload:
359
  uploaded_file = st.file_uploader("Add a file for context:", type=["pdf", "xml", "json", "xlsx", "csv", "html", "htm", "md", "txt"])
 
 
 
 
360
  document_sections = deque()
361
  document_responses = {}
362
  if uploaded_file is not None:
 
374
  else:
375
  if st.button(f"Chat about Section {i+1}"):
376
  st.write('Reasoning with your inputs...')
377
+ response = chat_with_model(user_prompt, section, model_choice)
378
  st.write('Response:')
379
  st.write(response)
380
  document_responses[i] = response
381
  filename = generate_filename(f"{user_prompt}_section_{i+1}", choice)
382
  create_file(filename, user_prompt, response, should_save)
383
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
384
  if st.button('💬 Chat'):
385
  st.write('Reasoning with your inputs...')
 
 
 
 
386
  user_prompt_sections = divide_prompt(user_prompt, max_length)
387
  full_response = ''
388
  for prompt_section in user_prompt_sections:
 
389
  response = chat_with_model(prompt_section, ''.join(list(document_sections)), model_choice)
390
  full_response += response + '\n' # Combine the responses
 
 
 
 
391
  response = full_response
392
  st.write('Response:')
393
  st.write(response)
 
394
  filename = generate_filename(user_prompt, choice)
395
  create_file(filename, user_prompt, response, should_save)
396
  st.sidebar.markdown(get_table_download_link(filename), unsafe_allow_html=True)
 
397
  all_files = glob.glob("*.*")
398
  all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 20] # exclude files with short names
399
  all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True) # sort by file type and file name in descending order
 
 
400
  if st.sidebar.button("🗑 Delete All"):
401
  for file in all_files:
402
  os.remove(file)
403
  st.experimental_rerun()
 
 
404
  if st.sidebar.button("⬇️ Download All"):
405
  zip_file = create_zip_of_files(all_files)
406
  st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
 
 
407
  file_contents=''
408
  next_action=''
409
  for file in all_files:
 
429
  if st.button("🗑", key="delete_"+file):
430
  os.remove(file)
431
  st.experimental_rerun()
 
432
  if len(file_contents) > 0:
433
  if next_action=='open':
434
  file_content_area = st.text_area("File Contents:", file_contents, height=500)
 
440
  response = chat_with_model(user_prompt, file_contents, model_choice)
441
  filename = generate_filename(file_contents, choice)
442
  create_file(filename, user_prompt, response, should_save)
 
443
  st.experimental_rerun()
 
 
444
 
445
  load_dotenv()
446
  st.write(css, unsafe_allow_html=True)
 
447
  st.header("Chat with documents :books:")
448
  user_question = st.text_input("Ask a question about your documents:")
449
  if user_question:
450
  process_user_input(user_question)
 
451
  with st.sidebar:
452
  st.subheader("Your documents")
453
  docs = st.file_uploader("import documents", accept_multiple_files=True)
 
461
  st.markdown('# AI Search Index of Length:' + length + ' Created.') # add timing
462
  filename = generate_filename(raw, 'txt')
463
  create_file(filename, raw, '', should_save)
 
 
 
 
 
464
 
465
  if __name__ == "__main__":
466
  main()