Severian commited on
Commit
1795131
1 Parent(s): e51a745

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +240 -0
app.py ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import snapshot_download
3
+ from llama_cpp import Llama
4
+
5
+ # System prompt text
6
+ SYSTEM_PROMPT = (
7
+ "Your name is ANIMA, an Advanced Nature Inspired Multidisciplinary Assistant, "
8
+ "and a leading expert in biomimicry, biology, engineering, industrial design, "
9
+ "environmental science, physiology, and paleontology. You were instructed to "
10
+ "understand, learn from, and emulate the strategies used by living things to help "
11
+ "users create sustainable designs and technologies.\n\n"
12
+ "Your goal is to help the user work in a step by step way through the Biomimicry Design "
13
+ "Process to propose biomimetic solutions to a challenge.\n\n"
14
+ "Use the questions listed below as a guide to help you reflect on your work:\n"
15
+ "• How does context play a role?\n"
16
+ "• Are the strategies operating at the same or different scales (nano, micro, macro, meso)?\n"
17
+ "• Are there repeating shapes, forms, or textures?\n"
18
+ "• What behaviors or processes are occurring?\n"
19
+ "• What relationships are at play?\n"
20
+ "• Does information play a role? How does it flow?\n"
21
+ "• How do your strategies relate to the different systems they are part of?\n\n"
22
+ "Consider each of your abstracted design strategies in relation to the original design "
23
+ "question or problem you identified in the Define step. Ask, “How can this strategy inform "
24
+ "our design solution?” Write down all of your ideas and then analyze them.\n\n"
25
+ "Think about how the strategies and design concepts you are working with relate to nature "
26
+ "unifying patterns. What is their role in the larger system? How can you use a systems view "
27
+ "to get to a deeper level of emulation or a more life-friendly solution?\n\n"
28
+ "Nature's Unifying Patterns:\n"
29
+ "Nature uses only the energy it needs and relies on freely available energy.\n"
30
+ "Nature recycles all materials.\n"
31
+ "Nature is resilient to disturbances.\n"
32
+ "Nature tends to optimize rather than maximize.\n"
33
+ "Nature provides mutual benefits.\n"
34
+ "Nature runs on information.\n"
35
+ "Nature uses chemistry and materials that are safe for living beings.\n"
36
+ "Nature builds using abundant resources, incorporating rare resources only sparingly.\n"
37
+ "Nature is locally attuned and responsive.\n"
38
+ "Nature uses shape to determine functionality."
39
+ )
40
+
41
+ SYSTEM_TOKEN = 1587
42
+ USER_TOKEN = 2188
43
+ BOT_TOKEN = 12435
44
+ LINEBREAK_TOKEN = 13
45
+
46
+ ROLE_TOKENS = {
47
+ "user": USER_TOKEN,
48
+ "bot": BOT_TOKEN,
49
+ "system": SYSTEM_TOKEN
50
+ }
51
+
52
+
53
+ def get_message_tokens(model, role, content):
54
+ message_tokens = model.tokenize(content.encode("utf-8"))
55
+ message_tokens.insert(1, ROLE_TOKENS[role])
56
+ message_tokens.insert(2, LINEBREAK_TOKEN)
57
+ message_tokens.append(model.token_eos())
58
+ return message_tokens
59
+
60
+
61
+ def get_system_tokens(model):
62
+ system_message = {"role": "system", "content": SYSTEM_PROMPT}
63
+ return get_message_tokens(model, **system_message)
64
+
65
+
66
+ repo_name = "Severian/ANIMA-Phi-Neptune-Mistral-7B-gguf"
67
+ model_name = "ANIMA-Phi-Neptune-Mistral-7B-gguf"
68
+
69
+ snapshot_download(repo_id=repo_name, local_dir=".", allow_patterns=model_name)
70
+
71
+ model = Llama(
72
+ model_path=model_name,
73
+ n_ctx=2000,
74
+ n_parts=1,
75
+ )
76
+
77
+ max_new_tokens = 1500
78
+
79
+ def user(message, history):
80
+ new_history = history + [[message, None]]
81
+ return "", new_history
82
+
83
+
84
+ def bot(
85
+ history,
86
+ system_prompt,
87
+ top_p,
88
+ top_k,
89
+ temp
90
+ ):
91
+ tokens = get_system_tokens(model)[:]
92
+ tokens.append(LINEBREAK_TOKEN)
93
+
94
+ for user_message, bot_message in history[:-1]:
95
+ message_tokens = get_message_tokens(model=model, role="user", content=user_message)
96
+ tokens.extend(message_tokens)
97
+ if bot_message:
98
+ message_tokens = get_message_tokens(model=model, role="bot", content=bot_message)
99
+ tokens.extend(message_tokens)
100
+
101
+ last_user_message = history[-1][0]
102
+ message_tokens = get_message_tokens(model=model, role="user", content=last_user_message)
103
+ tokens.extend(message_tokens)
104
+
105
+ role_tokens = [model.token_bos(), BOT_TOKEN, LINEBREAK_TOKEN]
106
+ tokens.extend(role_tokens)
107
+ generator = model.generate(
108
+ tokens,
109
+ top_k=top_k,
110
+ top_p=top_p,
111
+ temp=temp
112
+ )
113
+
114
+ partial_text = ""
115
+ for i, token in enumerate(generator):
116
+ if token == model.token_eos() or (max_new_tokens is not None and i >= max_new_tokens):
117
+ break
118
+ partial_text += model.detokenize([token]).decode("utf-8", "ignore")
119
+ history[-1][1] = partial_text
120
+ yield history
121
+
122
+
123
+ with gr.Blocks(
124
+ theme=gr.themes.Soft()
125
+ ) as demo:
126
+ #favicon = '<img src="https://cdn.midjourney.com/b88e5beb-6324-4820-8504-a1a37a9ba36d/0_1.png" width="48px" style="display: inline">'
127
+ gr.Markdown(
128
+ f"""<h1><center>{favicon}ANIMA</center></h1>
129
+
130
+ """
131
+ )
132
+ with gr.Blocks(theme=gr.themes.Soft()) as demo:
133
+ with gr.Row():
134
+ with gr.Column(scale=5):
135
+ system_prompt = gr.Textbox(label="System Prompt", placeholder="", value=SYSTEM_PROMPT, interactive=False)
136
+ chatbot = gr.Chatbot(label="Dialogue").style(height=400)
137
+ with gr.Column(min_width=80, scale=1):
138
+ with gr.Tab(label="Generation Parameters"):
139
+ # "Top-p" remains the same
140
+ top_p = gr.Slider(
141
+ minimum=0.0,
142
+ maximum=1.0,
143
+ value=0.9,
144
+ step=0.05,
145
+ interactive=True,
146
+ label="Top-p",
147
+ )
148
+ # "Top-k" remains the same
149
+ top_k = gr.Slider(
150
+ minimum=10,
151
+ maximum=100,
152
+ value=30,
153
+ step=5,
154
+ interactive=True,
155
+ label="Top-k",
156
+ )
157
+ # "Температура" translates to "Temperature"
158
+ temp = gr.Slider(
159
+ minimum=0.0,
160
+ maximum=2.0,
161
+ value=0.01,
162
+ step=0.01,
163
+ interactive=True,
164
+ label="Temperature",
165
+ )
166
+ with gr.Row():
167
+ with gr.Column():
168
+ # "Отправить сообщение" translates to "Send Message"
169
+ msg = gr.Textbox(
170
+ label="Send Message",
171
+ placeholder="Send Message",
172
+ show_label=False,
173
+ ).style(container=False)
174
+ with gr.Column():
175
+ with gr.Row():
176
+ # "Отправить" translates to "Submit"
177
+ submit = gr.Button("Submit")
178
+ # "Остановить" translates to "Stop"
179
+ stop = gr.Button("Stop")
180
+ # "Очистить" translates to "Clear"
181
+ clear = gr.Button("Clear")
182
+ with gr.Row():
183
+ # The warning message translates to "WARNING: The model may generate factually or ethically incorrect text. We are not responsible for this."
184
+ gr.Markdown(
185
+ """WARNING: The model may generate factually or ethically incorrect text. We are not responsible for this."""
186
+ )
187
+
188
+
189
+ # Pressing Enter
190
+ submit_event = msg.submit(
191
+ fn=user,
192
+ inputs=[msg, chatbot],
193
+ outputs=[msg, chatbot],
194
+ queue=False,
195
+ ).success(
196
+ fn=bot,
197
+ inputs=[
198
+ chatbot,
199
+ system_prompt,
200
+ top_p,
201
+ top_k,
202
+ temp
203
+ ],
204
+ outputs=chatbot,
205
+ queue=True,
206
+ )
207
+
208
+ # Pressing the button
209
+ submit_click_event = submit.click(
210
+ fn=user,
211
+ inputs=[msg, chatbot],
212
+ outputs=[msg, chatbot],
213
+ queue=False,
214
+ ).success(
215
+ fn=bot,
216
+ inputs=[
217
+ chatbot,
218
+ system_prompt,
219
+ top_p,
220
+ top_k,
221
+ temp
222
+ ],
223
+ outputs=chatbot,
224
+ queue=True,
225
+ )
226
+
227
+ # Stop generation
228
+ stop.click(
229
+ fn=None,
230
+ inputs=None,
231
+ outputs=None,
232
+ cancels=[submit_event, submit_click_event],
233
+ queue=False,
234
+ )
235
+
236
+ # Clear history
237
+ clear.click(lambda: None, None, chatbot, queue=False)
238
+
239
+ demo.queue(max_size=128, concurrency_count=1)
240
+ demo.launch()