yasserrmd commited on
Commit
ccf6147
·
verified ·
1 Parent(s): 5b91eaf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +20 -1
app.py CHANGED
@@ -6,6 +6,25 @@ import time # To simulate processing time if needed
6
  # Initialize the Hugging Face Inference Client
7
  client = InferenceClient()
8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
  # Function to generate and format AI response
10
  def generate_response(prompt_template, **kwargs):
11
  # Simulate processing/loading
@@ -19,7 +38,7 @@ def generate_response(prompt_template, **kwargs):
19
  top_p=0.8
20
  )
21
  response_content = response.choices[0].message["content"]
22
- formatted_response = response_content.replace("[", "$$").replace("]", "$$")
23
  return gr.update(value=f"{formatted_response}")
24
 
25
  # Gradio app interface
 
6
  # Initialize the Hugging Face Inference Client
7
  client = InferenceClient()
8
 
9
+
10
+
11
+ def preprocess_latex(content):
12
+ # Split content into lines for better formatting
13
+ lines = content.split("\n")
14
+ formatted_lines = []
15
+
16
+ for line in lines:
17
+ # If a line contains equations or math expressions, wrap them properly
18
+ if "Simplify" in line or "Solve" in line or "boxed" in line or "frac" in line:
19
+ formatted_lines.append(f"$$ {line.strip()} $$") # Block math
20
+ elif "(" in line and ")" in line: # Inline math for variables
21
+ formatted_lines.append(line.replace("(", "$").replace(")", "$"))
22
+ else:
23
+ formatted_lines.append(line) # Plain text
24
+
25
+ # Join lines back into a single string
26
+ return "\n".join(formatted_lines)
27
+
28
  # Function to generate and format AI response
29
  def generate_response(prompt_template, **kwargs):
30
  # Simulate processing/loading
 
38
  top_p=0.8
39
  )
40
  response_content = response.choices[0].message["content"]
41
+ formatted_response = preprocess_latex(response_content)
42
  return gr.update(value=f"{formatted_response}")
43
 
44
  # Gradio app interface