acecalisto3 commited on
Commit
f1f508e
1 Parent(s): 6421388

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +75 -38
app.py CHANGED
@@ -1,12 +1,36 @@
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
 
 
3
 
4
- """
5
- For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
- """
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
 
 
 
 
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  def respond(
11
  message,
12
  history: list[tuple[str, str]],
@@ -23,41 +47,54 @@ def respond(
23
  if val[1]:
24
  messages.append({"role": "assistant", "content": val[1]})
25
 
26
- messages.append({"role": "user", "content": message})
27
-
28
- response = ""
29
-
30
- for message in client.chat_completion(
31
- messages,
32
- max_tokens=max_tokens,
33
- stream=True,
34
- temperature=temperature,
35
- top_p=top_p,
36
- ):
37
- token = message.choices[0].delta.content
38
-
39
- response += token
40
- yield response
41
-
42
- """
43
- For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
44
- """
45
- demo = gr.ChatInterface(
46
- respond,
47
- additional_inputs=[
48
- gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
49
- gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
50
- gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
51
- gr.Slider(
52
- minimum=0.1,
53
- maximum=1.0,
54
- value=0.95,
55
- step=0.05,
56
- label="Top-p (nucleus sampling)",
57
- ),
58
- ],
59
- )
60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
 
62
  if __name__ == "__main__":
63
- demo.launch()
 
1
  import gradio as gr
2
  from huggingface_hub import InferenceClient
3
+ import os
4
+ import requests
5
 
6
+ # Hugging Face Inference Client
 
 
7
  client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
 
9
+ # GitHub API details
10
+ GITHUB_USERNAME = "YOUR_USERNAME"
11
+ GITHUB_REPOSITORY = "YOUR_REPOSITORY"
12
+ GITHUB_API_TOKEN = os.getenv("GITHUB_API_TOKEN")
13
 
14
+ # Function to fetch GitHub issues
15
+ def fetch_github_issues():
16
+ url = f"https://api.github.com/repos/{GITHUB_USERNAME}/{GITHUB_REPOSITORY}/issues"
17
+ headers = {
18
+ "Authorization": f"Bearer {GITHUB_API_TOKEN}",
19
+ "Accept": "application/vnd.github.v3+json"
20
+ }
21
+ response = requests.get(url, headers=headers)
22
+ if response.status_code == 200:
23
+ return response.json()
24
+ else:
25
+ raise Exception(f"Error fetching issues: {response.status_code}")
26
+
27
+ # Function to analyze issues and provide solutions
28
+ def analyze_issues(issue_text, model_name):
29
+ nlp = pipeline("text-generation", model=model_name)
30
+ result = nlp(issue_text)
31
+ return result[0]['generated_text']
32
+
33
+ # Function to handle chat responses
34
  def respond(
35
  message,
36
  history: list[tuple[str, str]],
 
47
  if val[1]:
48
  messages.append({"role": "assistant", "content": val[1]})
49
 
50
+ if message.startswith("/github"):
51
+ try:
52
+ issues = fetch_github_issues()
53
+ issue_list = "\n".join([f"{i+1}. {issue['title']}" for i, issue in enumerate(issues)])
54
+ yield f"Available GitHub Issues:\n{issue_list}\n\nEnter the issue number to analyze:"
55
+ except Exception as e:
56
+ yield f"Error fetching GitHub issues: {e}"
57
+ elif message.isdigit():
58
+ try:
59
+ issue_number = int(message) - 1
60
+ issues = fetch_github_issues()
61
+ issue = issues[issue_number]
62
+ issue_text = issue['title'] + "\n\n" + issue['body']
63
+ resolution = analyze_issues(issue_text, "gpt2") # Default to gpt2 for now
64
+ yield f"Resolution for Issue '{issue['title']}':\n{resolution}"
65
+ except Exception as e:
66
+ yield f"Error analyzing issue: {e}"
67
+ else:
68
+ messages.append({"role": "user", "content": message})
69
+
70
+ response = ""
71
+ for message in client.chat_completion(
72
+ messages,
73
+ max_tokens=max_tokens,
74
+ stream=True,
75
+ temperature=temperature,
76
+ top_p=top_p,
77
+ ):
78
+ token = message.choices[0].delta.content
79
+ response += token
80
+ yield response
 
 
 
81
 
82
+ with gr.Blocks() as demo:
83
+ chatbot = gr.ChatInterface(
84
+ respond,
85
+ additional_inputs=[
86
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
87
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
88
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
89
+ gr.Slider(
90
+ minimum=0.1,
91
+ maximum=1.0,
92
+ value=0.95,
93
+ step=0.05,
94
+ label="Top-p (nucleus sampling)",
95
+ ),
96
+ ],
97
+ )
98
 
99
  if __name__ == "__main__":
100
+ demo.queue().launch(share=True, server_name="0.0.0.0", server_port=7860)