Futuresony commited on
Commit
f0e607c
·
verified ·
1 Parent(s): ce7ff30

Create app py

Browse files
Files changed (1) hide show
  1. app py +110 -0
app py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+ from textblob import TextBlob
4
+ import json
5
+ import os
6
+
7
+ client = InferenceClient("Futuresony/future_ai_12_10_2024.gguf")
8
+
9
+ # Directory to store interactions and feedback
10
+ DATA_DIR = "data"
11
+ INTERACTIONS_FILE = os.path.join(DATA_DIR, "interactions.json")
12
+
13
+ # Ensure the data directory exists
14
+ os.makedirs(DATA_DIR, exist_ok=True)
15
+
16
+ def format_alpaca_prompt(user_input, system_prompt, history):
17
+ """Formats input in Alpaca/LLaMA style"""
18
+ history_str = "\n".join([f"### Instruction:\n{h[0]}\n### Response:\n{h[1]}" for h in history])
19
+ prompt = f"""{system_prompt}
20
+ {history_str}
21
+
22
+ ### Instruction:
23
+ {user_input}
24
+
25
+ ### Response:
26
+ """
27
+ return prompt
28
+
29
+ def analyze_sentiment(message):
30
+ """Analyze the sentiment of the user's message"""
31
+ blob = TextBlob(message)
32
+ sentiment = blob.sentiment.polarity
33
+ return sentiment
34
+
35
+ def save_interaction(user_input, chatbot_response, feedback=None):
36
+ """Save the interaction and feedback to a file"""
37
+ interaction = {
38
+ "user_input": user_input,
39
+ "chatbot_response": chatbot_response,
40
+ "feedback": feedback,
41
+ "timestamp": "2025-02-25 04:00:30"
42
+ }
43
+ if os.path.exists(INTERACTIONS_FILE):
44
+ with open(INTERACTIONS_FILE, "r") as file:
45
+ interactions = json.load(file)
46
+ else:
47
+ interactions = []
48
+
49
+ interactions.append(interaction)
50
+
51
+ with open(INTERACTIONS_FILE, "w") as file:
52
+ json.dump(interactions, file, indent=4)
53
+
54
+ def respond(message, history, system_message, max_tokens, temperature, top_p, feedback=None):
55
+ sentiment = analyze_sentiment(message)
56
+
57
+ # Adjust system message based on sentiment
58
+ if sentiment < -0.2:
59
+ system_message = "You are a sympathetic Chatbot."
60
+ elif sentiment > 0.2:
61
+ system_message = "You are an enthusiastic Chatbot."
62
+ else:
63
+ system_message = "You are a friendly Chatbot."
64
+
65
+ formatted_prompt = format_alpaca_prompt(message, system_message, history)
66
+
67
+ response = client.text_generation(
68
+ formatted_prompt,
69
+ max_new_tokens=max_tokens,
70
+ temperature=temperature,
71
+ top_p=top_p,
72
+ )
73
+
74
+ # ✅ Extract only the response
75
+ cleaned_response = response.split("### Response:")[-1].strip()
76
+
77
+ history.append((message, cleaned_response)) # ✅ Update history with the new message and response
78
+
79
+ save_interaction(message, cleaned_response, feedback) # ✅ Save the interaction and feedback
80
+
81
+ yield cleaned_response # ✅ Output only the answer
82
+
83
+ def collect_feedback(response, feedback):
84
+ """Collect user feedback on the chatbot's response"""
85
+ save_interaction(response, feedback=feedback)
86
+
87
+ feedback_interface = gr.Interface(
88
+ fn=collect_feedback,
89
+ inputs=[
90
+ gr.Textbox(label="Response"),
91
+ gr.Radio(choices=["Good", "Bad"], label="Feedback"),
92
+ ],
93
+ outputs="text",
94
+ title="Feedback Interface"
95
+ )
96
+
97
+ demo = gr.ChatInterface(
98
+ respond,
99
+ additional_inputs=[
100
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
101
+ gr.Slider(minimum=1, maximum=250, value=128, step=1, label="Max new tokens"),
102
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.9, step=0.1, label="Temperature"),
103
+ gr.Slider(minimum=0.1, maximum=1.0, value=0.99, step=0.01, label="Top-p (nucleus sampling)"),
104
+ gr.Radio(choices=["Good", "Bad"], label="Feedback", optional=True),
105
+ ],
106
+ )
107
+
108
+ if __name__ == "__main__":
109
+ demo.launch()
110
+ feedback_interface.launch()