File size: 9,901 Bytes
3bddc3f bc91045 3bddc3f 4b286f3 94421aa 3bddc3f bc91045 3bddc3f 6acf91d 1ba22b4 3bddc3f 1ba22b4 4b286f3 3bddc3f bc91045 3bddc3f bc91045 3bddc3f 1ba22b4 6acf91d aa07a0a 6acf91d 4b286f3 bc91045 1ba22b4 bc91045 1ba22b4 3bddc3f bc91045 1ba22b4 3bddc3f bc91045 4b286f3 bc91045 3bddc3f 45e1616 bc91045 1ba22b4 6acf91d 3bddc3f bc91045 39cf431 027e874 94421aa 027e874 bc91045 027e874 3bddc3f 82502f1 149da6f 82502f1 149da6f 94421aa 149da6f 82502f1 149da6f 82502f1 149da6f 94421aa 82502f1 94421aa 1ba22b4 82502f1 149da6f 82502f1 149da6f 82502f1 149da6f 82502f1 149da6f 82502f1 149da6f 94421aa 149da6f 82502f1 149da6f 82502f1 149da6f 94421aa 82502f1 94421aa 149da6f 82502f1 1ba22b4 149da6f 82502f1 149da6f 82502f1 149da6f 82502f1 1ba22b4 82502f1 027e874 bc91045 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 |
import logging
import os
import re
import warnings
from pathlib import Path
import gradio as gr
import requests
import torch
from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor, pipeline, Pipeline
from templates import starting_app_code, update_iframe_js, copy_snippet_js, download_code_js, load_js, DemoType
# Filter the UserWarning raised by the audio component.
warnings.filterwarnings("ignore", message='Trying to convert audio automatically from int32 to 16-bit int format')
logging.basicConfig(
level=logging.INFO, # Set the logging level to INFO or any other desired level
format="%(asctime)s - %(message)s", # Define the log message format
datefmt="%Y-%m-%d %H:%M:%S", # Define the timestamp format
)
logger = logging.getLogger("my_logger")
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise Exception("HF_TOKEN environment variable is required to call remote API.")
API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
headers = {"Authorization": f"Bearer {HF_TOKEN}"}
def init_speech_to_text_model() -> Pipeline:
device = "cuda:0" if torch.cuda.is_available() else "cpu"
torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
model_id = "distil-whisper/distil-medium.en"
model = AutoModelForSpeechSeq2Seq.from_pretrained(
model_id, torch_dtype=torch_dtype, low_cpu_mem_usage=True, use_safetensors=True
)
model.to(device)
processor = AutoProcessor.from_pretrained(model_id)
return pipeline(
"automatic-speech-recognition",
model=model,
tokenizer=processor.tokenizer,
feature_extractor=processor.feature_extractor,
max_new_tokens=128,
torch_dtype=torch_dtype,
device=device,
)
whisper_pipe = init_speech_to_text_model()
code_pattern = re.compile(r'```python\n(.*?)```', re.DOTALL)
def query(payload: dict):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()
def generate_text(code: str, prompt: str) -> (str, str, str):
logger.info(f"Calling API with prompt:\n{prompt}")
prompt = f"```python\n{code}```\nGiven the code above return only updated code for the following request:\n{prompt}\n<|assistant|>"
params = {"max_new_tokens": 512}
output = query({"inputs": prompt, "parameters": params})
if 'error' in output:
logger.warning(f'Language model call failed: {output["error"]}')
raise gr.Warning(f'Language model call failed: {output["error"]}')
logger.info(f'API RESPONSE\n{output[0]["generated_text"]}')
assistant_reply = output[0]["generated_text"].split('<|assistant|>')[1]
match = re.search(code_pattern, assistant_reply)
if not match:
return assistant_reply, code, None
new_code = match.group(1)
logger.info(f'NEW CODE:\nnew_code')
return assistant_reply, new_code, None
def transcribe(audio: str) -> (str, str):
result = whisper_pipe(audio)
return result["text"], None
def copy_notify(code):
gr.Info("App code snippet copied!")
def add_hotkeys() -> str:
return Path("hotkeys.js").read_text()
with gr.Blocks() as demo:
gr.Markdown("<h1 id=\"TEST\" align=\"center\">KiteWind πͺπ</h1>")
gr.Markdown(
"<h4 align=\"center\">Chat-assisted web app creator by <a href=\"https://huggingface.co/gstaff\">@gstaff</a></h4>")
selectedTab = gr.State(value='gradio-lite')
with gr.Tab('Gradio (gradio-lite)') as gradio_lite_tab:
with gr.Row():
with gr.Column():
gr.Markdown("## 1. Run your app in the browser!")
gr.HTML(value='<div id="gradioDemoDiv"></div>')
gr.Markdown("## 2. Customize using voice requests!")
with gr.Row():
with gr.Column():
with gr.Group():
in_audio = gr.Audio(label="Record a voice request (click or press ctrl + ` to start/stop)", source='microphone', type='filepath', elem_classes=["record-btn"])
in_prompt = gr.Textbox(label="Or type a text request and press Enter",
placeholder="Need an idea? Try one of these:\n- Add a button to reverse the name\n- Change the greeting to Spanish\n- Put the reversed name output into a separate textbox")
out_text = gr.TextArea(label="Chat Assistant Response")
clear = gr.ClearButton([in_prompt, in_audio, out_text])
with gr.Column():
code_area = gr.Code(label="App Code - You can also edit directly and then click Update App or ctrl + space",
language='python', value=starting_app_code(DemoType.GRADIO))
update_btn = gr.Button("Update App (Ctrl + Space)", variant="primary", elem_classes=["update-btn"])
code_update_params = {'fn': None, 'inputs': code_area, 'outputs': None,
'_js': update_iframe_js(DemoType.GRADIO)}
gen_text_params = {'fn': generate_text, 'inputs': [code_area, in_prompt],
'outputs': [out_text, code_area]}
transcribe_params = {'fn': transcribe, 'inputs': [in_audio], 'outputs': [in_prompt, in_audio]}
update_btn.click(**code_update_params)
in_prompt.submit(**gen_text_params).then(**code_update_params)
in_audio.stop_recording(**transcribe_params).then(**gen_text_params).then(**code_update_params)
with gr.Row():
with gr.Column():
gr.Markdown("## 3. Export your app to share!")
copy_snippet_btn = gr.Button("Copy app snippet to paste in another page")
copy_snippet_btn.click(copy_notify, code_area, None, _js=copy_snippet_js(DemoType.GRADIO))
download_btn = gr.Button("Download app as a standalone file")
download_btn.click(None, code_area, None, _js=download_code_js(DemoType.GRADIO))
with gr.Row():
with gr.Column():
gr.Markdown("## Current limitations")
with gr.Accordion("Click to view", open=False):
gr.Markdown(
"- Only gradio-lite apps using the python standard libraries and gradio are supported\n- The chat hasn't been tuned on gradio library data; it may make mistakes\n- The app needs to fully reload each time it is changed")
with gr.Tab('Streamlit (stlite)') as stlite_tab:
with gr.Row():
with gr.Column():
gr.Markdown("## 1. Run your app in the browser!")
gr.HTML(value='<div id="stliteDemoDiv"></div>')
gr.Markdown("## 2. Customize using voice requests!")
with gr.Row():
with gr.Column():
with gr.Group():
in_audio = gr.Audio(label="Record a voice request (click or press ctrl + ` to start/stop)", source='microphone', type='filepath', elem_classes=["record-btn"])
in_prompt = gr.Textbox(label="Or type a text request and press Enter",
placeholder="Need an idea? Try one of these:\n- Add a button to reverse the name\n- Change the greeting to Spanish\n- Make the button primary")
out_text = gr.TextArea(label="Chat Assistant Response")
clear_btn = gr.ClearButton([in_prompt, in_audio, out_text])
with gr.Column():
code_area = gr.Code(label="App Code - You can also edit directly and then click Update App or ctrl + space",
language='python', value=starting_app_code(DemoType.STREAMLIT))
update_btn = gr.Button("Update App (Ctrl + Space)", variant="primary", elem_classes=["update-btn"])
code_update_params = {'fn': None, 'inputs': code_area, 'outputs': None,
'_js': update_iframe_js(DemoType.STREAMLIT)}
gen_text_params = {'fn': generate_text, 'inputs': [code_area, in_prompt],
'outputs': [out_text, code_area]}
transcribe_params = {'fn': transcribe, 'inputs': [in_audio], 'outputs': [in_prompt, in_audio]}
update_btn.click(**code_update_params)
in_prompt.submit(**gen_text_params).then(**code_update_params)
in_audio.stop_recording(**transcribe_params).then(**gen_text_params).then(**code_update_params)
with gr.Row():
with gr.Column():
gr.Markdown("## 3. Export your app to share!")
copy_snippet_btn = gr.Button("Copy app snippet to paste in another page")
copy_snippet_btn.click(copy_notify, code_area, None, _js=copy_snippet_js(DemoType.STREAMLIT))
download_btn = gr.Button("Download app as a standalone file")
download_btn.click(None, code_area, None, _js=download_code_js(DemoType.STREAMLIT))
with gr.Row():
with gr.Column():
gr.Markdown("## Current limitations")
with gr.Accordion("Click to view", open=False):
gr.Markdown(
"- Only Streamlit apps using libraries available in pyodide are supported\n- The chat hasn't been tuned on Streamlit library data; it may make mistakes")
gradio_lite_tab.select(lambda: "gradio-lite", None, selectedTab).then(None, None, None,
_js=load_js(DemoType.GRADIO))
stlite_tab.select(lambda: "stlite", None, selectedTab).then(None, None, None, _js=load_js(DemoType.STREAMLIT))
demo.load(None, None, None, _js=load_js(DemoType.GRADIO))
demo.load(None, None, None, _js=add_hotkeys())
demo.css = "footer {visibility: hidden}"
if __name__ == "__main__":
demo.queue().launch()
|