freddyaboulton HF staff commited on
Commit
d6cc295
1 Parent(s): e201c60

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -3
app.py CHANGED
@@ -4,6 +4,8 @@ from gradio_webrtc import WebRTC, ReplyOnPause, AdditionalOutputs
4
  import numpy as np
5
  import os
6
  from twilio.rest import Client
 
 
7
 
8
  account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
9
  auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
@@ -33,6 +35,34 @@ whisper = pipeline(
33
  system_prompt = "You are an AI coding assistant. Your task is to write single-file HTML applications based on a user's request. You may also be asked to edit your original response. Only return the code needed to fulfill the request."
34
  user_prompt = "Please write a single-file HTML application to fulfill the following request. Only return the necessary code. Include all necessary imports and styles.\nThe message:{user_message}\nCurrent code you have written:{code}"
35
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
36
 
37
 
38
  def generate(user_message: tuple[int, np.ndarray],
@@ -48,17 +78,20 @@ def generate(user_message: tuple[int, np.ndarray],
48
  history.append({"role": "user", "content": user_msg_formatted})
49
  input_text = tokenizer.apply_chat_template(history, tokenize=False)
50
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
51
- outputs = model.generate(inputs, max_new_tokens=500, temperature=0.2, top_p=0.9, do_sample=True)
52
  response = tokenizer.decode(outputs[0])
 
 
53
  output = response[response.rindex("<|im_start|>assistant\n") + len("<|im_start|>assistant\n"):]
 
54
  history.append({"role": "assistant", "content": output})
55
- yield AdditionalOutputs(history, output)
56
 
57
 
58
  with gr.Blocks() as demo:
59
  history = gr.State([{"role": "system", "content": system_prompt}])
60
  with gr.Row():
61
- code = gr.Code(language="html")
62
  sandbox = gr.HTML("")
63
  with gr.Row():
64
  webrtc = WebRTC(rtc_configuration=rtc_configuration, mode="send", modality="audio")
@@ -67,6 +100,7 @@ with gr.Blocks() as demo:
67
  outputs=[webrtc], time_limit=90)
68
  webrtc.on_additional_outputs(lambda history, code: (history, code),
69
  outputs=[history, code])
 
70
 
71
  if __name__ == "__main__":
72
  demo.launch()
 
4
  import numpy as np
5
  import os
6
  from twilio.rest import Client
7
+ import base64
8
+
9
 
10
  account_sid = os.environ.get("TWILIO_ACCOUNT_SID")
11
  auth_token = os.environ.get("TWILIO_AUTH_TOKEN")
 
35
  system_prompt = "You are an AI coding assistant. Your task is to write single-file HTML applications based on a user's request. You may also be asked to edit your original response. Only return the code needed to fulfill the request."
36
  user_prompt = "Please write a single-file HTML application to fulfill the following request. Only return the necessary code. Include all necessary imports and styles.\nThe message:{user_message}\nCurrent code you have written:{code}"
37
 
38
+ def extract_html_content(text):
39
+ """
40
+ Extract content including HTML tags.
41
+ """
42
+ try:
43
+ start_tag = "<html>"
44
+ end_tag = "</html>"
45
+
46
+ # Find positions of start and end tags
47
+ start_pos = text.find(start_tag)
48
+ end_pos = text.find(end_tag)
49
+
50
+ # Check if both tags exist and are in correct order
51
+ if start_pos == -1 or end_pos == -1 or start_pos > end_pos:
52
+ return None
53
+
54
+ # Extract content including tags
55
+ return text[start_pos:end_pos + len(end_tag)]
56
+
57
+ except Exception as e:
58
+ print(f"Error processing string: {e}")
59
+ return None
60
+
61
+
62
+ def display_in_sandbox(code):
63
+ encoded_html = base64.b64encode(code.encode('utf-8')).decode('utf-8')
64
+ data_uri = f"data:text/html;charset=utf-8;base64,{encoded_html}"
65
+ return f"<iframe src=\"{data_uri}\" width=\"100%\" height=\"920px\"></iframe>"
66
 
67
 
68
  def generate(user_message: tuple[int, np.ndarray],
 
78
  history.append({"role": "user", "content": user_msg_formatted})
79
  input_text = tokenizer.apply_chat_template(history, tokenize=False)
80
  inputs = tokenizer.encode(input_text, return_tensors="pt").to(device)
81
+ outputs = model.generate(inputs, max_new_tokens=2048, temperature=0.2, top_p=0.9, do_sample=True)
82
  response = tokenizer.decode(outputs[0])
83
+ print("response", response)
84
+
85
  output = response[response.rindex("<|im_start|>assistant\n") + len("<|im_start|>assistant\n"):]
86
+ html_code = extract_html_content(output)
87
  history.append({"role": "assistant", "content": output})
88
+ yield AdditionalOutputs(history, html_code)
89
 
90
 
91
  with gr.Blocks() as demo:
92
  history = gr.State([{"role": "system", "content": system_prompt}])
93
  with gr.Row():
94
+ code = gr.Code(language="html", interactive=False)
95
  sandbox = gr.HTML("")
96
  with gr.Row():
97
  webrtc = WebRTC(rtc_configuration=rtc_configuration, mode="send", modality="audio")
 
100
  outputs=[webrtc], time_limit=90)
101
  webrtc.on_additional_outputs(lambda history, code: (history, code),
102
  outputs=[history, code])
103
+ code.change(display_in_sandbox, code, html, queue=False)
104
 
105
  if __name__ == "__main__":
106
  demo.launch()