awacke1 commited on
Commit
ee938cd
1 Parent(s): 47fdd3a

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -3
app.py CHANGED
@@ -191,6 +191,7 @@ def StreamLLMChatResponse(prompt):
191
  endpoint_url = API_URL
192
  hf_token = API_KEY
193
  client = InferenceClient(endpoint_url, token=hf_token)
 
194
  gen_kwargs = dict(
195
  max_new_tokens=512,
196
  top_k=30,
@@ -251,7 +252,8 @@ def transcribe_audio(openai_key, file_path, model):
251
  "Authorization": f"Bearer {openai_key}",
252
  }
253
  with open(file_path, 'rb') as f:
254
- data = {'file': f}
 
255
  response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
256
  if response.status_code == 200:
257
  st.write(response.json())
@@ -377,7 +379,8 @@ def chat_with_model(prompt, document_section, model_choice='gpt-3.5-turbo'):
377
  report = []
378
  res_box = st.empty()
379
  collected_chunks = []
380
- collected_messages = []
 
381
  for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
382
  collected_chunks.append(chunk)
383
  chunk_message = chunk['choices'][0]['delta']
@@ -451,6 +454,7 @@ def txt2chunks(text):
451
  # Vector Store using FAISS
452
  @st.cache_resource
453
  def vector_store(text_chunks):
 
454
  embeddings = OpenAIEmbeddings(openai_api_key=key)
455
  return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
456
 
@@ -528,7 +532,8 @@ headers = {
528
  #@st.cache_resource
529
  def query(filename):
530
  with open(filename, "rb") as f:
531
- data = f.read()
 
532
  response = requests.post(API_URL_IE, headers=headers, data=data)
533
  return response.json()
534
 
 
191
  endpoint_url = API_URL
192
  hf_token = API_KEY
193
  client = InferenceClient(endpoint_url, token=hf_token)
194
+ st.write('Opened HF hub Inference Client for endpoint URL: ' + endpoint_url)
195
  gen_kwargs = dict(
196
  max_new_tokens=512,
197
  top_k=30,
 
252
  "Authorization": f"Bearer {openai_key}",
253
  }
254
  with open(file_path, 'rb') as f:
255
+ data = {'file': f
256
+ st.write('Transcribe Audio is Posting request to ' + OPENAI_API_URL)
257
  response = requests.post(OPENAI_API_URL, headers=headers, files=data, data={'model': model})
258
  if response.status_code == 200:
259
  st.write(response.json())
 
379
  report = []
380
  res_box = st.empty()
381
  collected_chunks = []
382
+ collected_messages =
383
+ st.write('Running prompt with ' + model)
384
  for chunk in openai.ChatCompletion.create(model='gpt-3.5-turbo', messages=conversation, temperature=0.5, stream=True):
385
  collected_chunks.append(chunk)
386
  chunk_message = chunk['choices'][0]['delta']
 
454
  # Vector Store using FAISS
455
  @st.cache_resource
456
  def vector_store(text_chunks):
457
+ st.write('Retrieving OpenAI embeddings')
458
  embeddings = OpenAIEmbeddings(openai_api_key=key)
459
  return FAISS.from_texts(texts=text_chunks, embedding=embeddings)
460
 
 
532
  #@st.cache_resource
533
  def query(filename):
534
  with open(filename, "rb") as f:
535
+ data = f.read
536
+ st.write('Posting request to model ' + API_URL_IE)
537
  response = requests.post(API_URL_IE, headers=headers, data=data)
538
  return response.json()
539