emeses commited on
Commit
6563121
·
1 Parent(s): b296482

Update space

Browse files
Files changed (1) hide show
  1. app.py +108 -38
app.py CHANGED
@@ -1,51 +1,121 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
 
3
 
4
- client = InferenceClient("meta-llama/Llama-3.2-3B-Instruct")
 
5
 
6
- def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p):
7
- messages = [{"role": "system", "content": system_message}]
8
- for val in history:
9
- if val[0]:
10
- messages.append({"role": "user", "content": val[0]})
11
- if val[1]:
12
- messages.append({"role": "assistant", "content": val[1]})
13
- messages.append({"role": "user", "content": message})
14
- response = ""
15
- for message in client.chat_completion(
16
- messages,
17
- max_tokens=max_tokens,
18
- stream=True,
19
- temperature=temperature,
20
- top_p=top_p,
21
- ):
22
- token = message.choices[0].delta.content
23
- response += token
24
- yield response
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
25
 
26
- def upload_schedule(files):
27
- # Process the uploaded files here
28
- return f"Uploaded {len(files)} file(s)"
 
 
 
29
 
30
  with gr.Blocks() as demo:
31
  with gr.Row():
 
32
  with gr.Column(scale=1):
33
- gr.File(label="Upload Schedule", file_count="multiple", type="file", fn=upload_schedule)
34
- with gr.Column(scale=3):
35
- gr.ChatInterface(
 
 
 
 
 
 
 
 
 
 
 
 
 
36
  respond,
37
- additional_inputs=[
38
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
39
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
40
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
41
- gr.Slider(
42
- minimum=0.1,
43
- maximum=1.0,
44
- value=0.95,
45
- step=0.05,
46
- label="Top-p (nucleus sampling)",
47
- ),
48
- ],
49
  )
50
 
51
  if __name__ == "__main__":
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import pandas as pd
4
+ import requests
5
+ from bs4 import BeautifulSoup
6
 
7
+ # Initialize HF client
8
+ client = InferenceClient("meta-llama/Llama-2-7b-chat-hf")
9
 
10
+ def respond(message, history, max_tokens=512, temperature=0.7, top_p=0.95):
11
+ try:
12
+ # Format messages including history
13
+ messages = []
14
+ for user_msg, assistant_msg in history:
15
+ messages.append({"role": "user", "content": user_msg})
16
+ messages.append({"role": "assistant", "content": assistant_msg})
17
+ messages.append({"role": "user", "content": message})
18
+
19
+ # Generate response
20
+ response = ""
21
+ for chunk in client.chat_completion(
22
+ messages,
23
+ max_tokens=max_tokens, # Changed back to max_tokens
24
+ temperature=temperature,
25
+ top_p=top_p,
26
+ stream=True,
27
+ ):
28
+ if hasattr(chunk.choices[0].delta, 'content'):
29
+ token = chunk.choices[0].delta.content
30
+ if token:
31
+ response += token
32
+ return response
33
+
34
+ except Exception as e:
35
+ return f"Error: {str(e)}"
36
+
37
+ def extract_schedule(url):
38
+ try:
39
+ # Fetch and parse webpage
40
+ response = requests.get(url)
41
+ response.raise_for_status()
42
+ soup = BeautifulSoup(response.text, 'html.parser')
43
+
44
+ # Find table and extract data
45
+ table = soup.find('table')
46
+ if not table:
47
+ return "<p>No table found on page</p>"
48
+
49
+ schedule_data = []
50
+ rows = table.find_all('tr')
51
+
52
+ for row in rows[1:]: # Skip header row
53
+ cells = row.find_all('td')
54
+ if len(cells) >= 4: # Only process rows with enough columns
55
+ date = cells[0].text.strip()
56
+ topic = cells[1].text.strip()
57
+
58
+ # Skip empty rows and non-lecture entries
59
+ if date and topic and not topic.startswith('See Canvas'):
60
+ schedule_data.append({
61
+ 'Date': date[:10], # Only YYYY-MM-DD
62
+ 'Topic': topic
63
+ })
64
+
65
+ # Create DataFrame
66
+ df = pd.DataFrame(schedule_data)
67
+
68
+ # Convert to HTML with styling
69
+ html = f"""
70
+ <style>
71
+ table {{ border-collapse: collapse; width: 100%; }}
72
+ th, td {{
73
+ border: 1px solid black;
74
+ padding: 8px;
75
+ text-align: left;
76
+ }}
77
+ th {{ background-color: #f2f2f2; }}
78
+ </style>
79
+ {df.to_html(index=False)}
80
+ """
81
+ return html
82
+
83
+ except Exception as e:
84
+ return f"<p>Error: {str(e)}</p>"
85
 
86
+ def display_schedule(url):
87
+ try:
88
+ html_table = extract_schedule(url)
89
+ return html_table # Already HTML string
90
+ except Exception as e:
91
+ return str(e)
92
 
93
  with gr.Blocks() as demo:
94
  with gr.Row():
95
+ # Left Column - Schedule
96
  with gr.Column(scale=1):
97
+ url_input = gr.Textbox(
98
+ value="https://id2223kth.github.io/schedule/",
99
+ label="Schedule URL"
100
+ )
101
+ schedule_output = gr.HTML(label="Extracted Schedule")
102
+ extract_btn = gr.Button("Extract Schedule")
103
+
104
+ extract_btn.click(
105
+ fn=display_schedule,
106
+ inputs=[url_input],
107
+ outputs=[schedule_output]
108
+ )
109
+
110
+ # Right Column - Chatbot
111
+ with gr.Column(scale=2):
112
+ chatbot = gr.ChatInterface(
113
  respond,
114
+ #additional_inputs=[
115
+ # gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
116
+ # gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
117
+ # gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
118
+ #]
 
 
 
 
 
 
 
119
  )
120
 
121
  if __name__ == "__main__":