prithivMLmods commited on
Commit
5a2b76f
·
verified ·
1 Parent(s): e271766

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -19
app.py CHANGED
@@ -1,18 +1,8 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import json
4
- import uuid
5
- from PIL import Image
6
  from bs4 import BeautifulSoup
7
  import requests
8
- import random
9
- from transformers import LlavaProcessor, LlavaForConditionalGeneration, TextIteratorStreamer
10
- from threading import Thread
11
- import re
12
- import time
13
- import torch
14
- import cv2
15
- from gradio_client import Client, file
16
 
17
  def extract_text_from_webpage(html_content):
18
  soup = BeautifulSoup(html_content, 'html.parser')
@@ -22,7 +12,6 @@ def extract_text_from_webpage(html_content):
22
 
23
  def search(query):
24
  term = query
25
- start = 0
26
  all_results = []
27
  max_chars_per_page = 8000
28
  with requests.Session() as session:
@@ -70,8 +59,7 @@ def respond(message, history):
70
  func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
71
  func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
72
 
73
- message_text = message["text"]
74
- func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message_text}'})
75
 
76
  response = client_gemma.chat_completion(func_caller, max_tokens=200)
77
  response = str(response)
@@ -97,7 +85,7 @@ def respond(message, history):
97
  for msg in history:
98
  messages += f"\nuser\n{str(msg[0])}"
99
  messages += f"\nassistant\n{str(msg[1])}"
100
- messages+=f"\nuser\n{message_text}\nweb_result\n{web2}\nassistant\n"
101
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
102
  output = ""
103
  for response in stream:
@@ -109,7 +97,7 @@ def respond(message, history):
109
  for msg in history:
110
  messages += f"\nuser\n{str(msg[0])}"
111
  messages += f"\nassistant\n{str(msg[1])}"
112
- messages+=f"\nuser\n{message_text}\nassistant\n"
113
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
114
  output = ""
115
  for response in stream:
@@ -121,7 +109,7 @@ def respond(message, history):
121
  for msg in history:
122
  messages += f"\nuser\n{str(msg[0])}"
123
  messages += f"\nassistant\n{str(msg[1])}"
124
- messages+=f"\nuser\n{message_text}\nassistant\n"
125
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
126
  output = ""
127
  for response in stream:
@@ -133,8 +121,8 @@ demo = gr.ChatInterface(
133
  fn=respond,
134
  chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
135
  description=" ",
136
- textbox=gr.Textbox(),
137
- multimodal=False,
138
  concurrency_limit=200,
139
  )
140
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
  import json
 
 
4
  from bs4 import BeautifulSoup
5
  import requests
 
 
 
 
 
 
 
 
6
 
7
  def extract_text_from_webpage(html_content):
8
  soup = BeautifulSoup(html_content, 'html.parser')
 
12
 
13
  def search(query):
14
  term = query
 
15
  all_results = []
16
  max_chars_per_page = 8000
17
  with requests.Session() as session:
 
59
  func_caller.append({"role": "user", "content": f"{str(msg[0])}"})
60
  func_caller.append({"role": "assistant", "content": f"{str(msg[1])}"})
61
 
62
+ func_caller.append({"role": "user", "content": f'[SYSTEM]You are a helpful assistant. You have access to the following functions: \n {str(functions_metadata)}\n\nTo use these functions respond with:\n<functioncall> {{ "name": "function_name", "arguments": {{ "arg_1": "value_1", "arg_1": "value_1", ... }} }} </functioncall> [USER] {message}'})
 
63
 
64
  response = client_gemma.chat_completion(func_caller, max_tokens=200)
65
  response = str(response)
 
85
  for msg in history:
86
  messages += f"\nuser\n{str(msg[0])}"
87
  messages += f"\nassistant\n{str(msg[1])}"
88
+ messages+=f"\nuser\n{message}\nweb_result\n{web2}\nassistant\n"
89
  stream = client_mixtral.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
90
  output = ""
91
  for response in stream:
 
97
  for msg in history:
98
  messages += f"\nuser\n{str(msg[0])}"
99
  messages += f"\nassistant\n{str(msg[1])}"
100
+ messages+=f"\nuser\n{message}\nassistant\n"
101
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
102
  output = ""
103
  for response in stream:
 
109
  for msg in history:
110
  messages += f"\nuser\n{str(msg[0])}"
111
  messages += f"\nassistant\n{str(msg[1])}"
112
+ messages+=f"\nuser\n{message}\nassistant\n"
113
  stream = client_llama.text_generation(messages, max_new_tokens=2000, do_sample=True, stream=True, details=True, return_full_text=False)
114
  output = ""
115
  for response in stream:
 
121
  fn=respond,
122
  chatbot=gr.Chatbot(show_copy_button=True, likeable=True, layout="panel"),
123
  description=" ",
124
+ textbox=gr.Textbox(), # Changed to Textbox
125
+ multimodal=False, # Disabled multimodal
126
  concurrency_limit=200,
127
  )
128
+ demo.launch(share=True)