MiladMola commited on
Commit
c0eba71
1 Parent(s): dab4cfd

Initial app.py

Browse files
Files changed (1) hide show
  1. app.py +176 -0
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import spaces
2
+ import json
3
+ import subprocess
4
+ from llama_cpp import Llama
5
+ from llama_cpp_agent import LlamaCppAgent
6
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
7
+ from llama_cpp_agent.chat_history import BasicChatHistory
8
+ from llama_cpp_agent.chat_history.messages import Roles
9
+ import gradio as gr
10
+ from huggingface_hub import hf_hub_download
11
+
12
+ hf_hub_download(
13
+ repo_id="PartAI/Dorna-Llama3-8B-Instruct-GGUF",
14
+ filename="dorna-llama3-8b-instruct.Q2_K.gguf",
15
+ local_dir = "./models"
16
+ )
17
+ hf_hub_download(
18
+ repo_id="PartAI/Dorna-Llama3-8B-Instruct-GGUF",
19
+ filename="dorna-llama3-8b-instruct.Q4_0.gguf",
20
+ local_dir = "./models"
21
+ )
22
+
23
+ css = """
24
+ .message-row {
25
+ justify-content: space-evenly !important;
26
+ }
27
+ .message-bubble-border {
28
+ border-radius: 6px !important;
29
+ }
30
+ .dark.message-bubble-border {
31
+ border-color: #343140 !important;
32
+ }
33
+ .dark.user {
34
+ background: #1e1c26 !important;
35
+ }
36
+ .dark.assistant.dark, .dark.pending.dark {
37
+ background: #16141c !important;
38
+ }
39
+ """
40
+
41
+ def get_messages_formatter_type(model_name):
42
+ from llama_cpp_agent import MessagesFormatterType
43
+ return MessagesFormatterType.CHATML
44
+
45
+ @spaces.GPU(duration=120)
46
+ def respond(
47
+ message,
48
+ history: list[tuple[str, str]],
49
+ system_message,
50
+ max_tokens,
51
+ temperature,
52
+ top_p,
53
+ top_k,
54
+ repeat_penalty,
55
+ model,
56
+ ):
57
+ chat_template = get_messages_formatter_type(model)
58
+
59
+ llm = Llama(
60
+ model_path=f"models/{model}",
61
+ flash_attn=True,
62
+ n_threads=40,
63
+ n_gpu_layers=81,
64
+ n_batch=1024,
65
+ n_ctx=8192,
66
+ )
67
+ provider = LlamaCppPythonProvider(llm)
68
+
69
+ agent = LlamaCppAgent(
70
+ provider,
71
+ system_prompt=f"{system_message}",
72
+ predefined_messages_formatter_type=chat_template,
73
+ debug_output=True
74
+ )
75
+
76
+ settings = provider.get_provider_default_settings()
77
+ settings.temperature = temperature
78
+ settings.top_k = top_k
79
+ settings.top_p = top_p
80
+ settings.max_tokens = max_tokens
81
+ settings.repeat_penalty = repeat_penalty
82
+ settings.stream = True
83
+
84
+ messages = BasicChatHistory()
85
+
86
+ for msn in history:
87
+ user = {
88
+ 'role': Roles.user,
89
+ 'content': msn[0]
90
+ }
91
+ assistant = {
92
+ 'role': Roles.assistant,
93
+ 'content': msn[1]
94
+ }
95
+ messages.add_message(user)
96
+ messages.add_message(assistant)
97
+
98
+ stream = agent.get_chat_response(
99
+ message,
100
+ llm_sampling_settings=settings,
101
+ chat_history=messages,
102
+ returns_streaming_generator=True,
103
+ print_output=False
104
+ )
105
+
106
+ outputs = ""
107
+ for output in stream:
108
+ outputs += output
109
+ yield outputs
110
+
111
+ PLACEHOLDER = """
112
+ <div class="message-bubble-border" style="display:flex; max-width: 600px; border-radius: 8px; box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1); backdrop-filter: blur(10px);">
113
+ <div style="padding: .5rem 1.5rem;">
114
+ <h2 style="text-align: left; font-size: 1.5rem; font-weight: 700; margin-bottom: 0.5rem;">Chat with CausalLM 34B (8-bit GGUF)</h2>
115
+ <p style="text-align: left; font-size: 16px; line-height: 1.5; margin-bottom: 15px;">You can try different models from CausalLM here.<br>Running on NVIDIA A100-SXM4-80GB MIG 3g.40gb with Zero-GPU from Hugging Face.</p>
116
+ </div>
117
+ </div>
118
+ """
119
+
120
+ demo = gr.ChatInterface(
121
+ respond,
122
+ additional_inputs=[
123
+ gr.Textbox(value="You are a helpful assistant.", label="System message"),
124
+ gr.Slider(minimum=1, maximum=8192, value=2048, step=1, label="Max tokens"),
125
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
126
+ gr.Slider(
127
+ minimum=0.1,
128
+ maximum=1.0,
129
+ value=0.95,
130
+ step=0.05,
131
+ label="Top-p",
132
+ ),
133
+ gr.Slider(
134
+ minimum=0,
135
+ maximum=100,
136
+ value=40,
137
+ step=1,
138
+ label="Top-k",
139
+ ),
140
+ gr.Slider(
141
+ minimum=0.0,
142
+ maximum=2.0,
143
+ value=1,
144
+ step=0.1,
145
+ label="Repetition penalty",
146
+ ),
147
+ gr.Dropdown([
148
+ 'myCauslLM-34b-beta-v0.1-q8.gguf',
149
+ '35b-beta-long-Q6_K.gguf',
150
+ ],
151
+ value="myCauslLM-34b-beta-v0.1-q8.gguf",
152
+ label="Model"
153
+ ),
154
+ ],
155
+ theme=gr.themes.Soft(primary_hue="violet", secondary_hue="violet", neutral_hue="gray",font=[gr.themes.GoogleFont("Exo"), "ui-sans-serif", "system-ui", "sans-serif"]).set(
156
+ body_background_fill_dark="#16141c",
157
+ block_background_fill_dark="#16141c",
158
+ block_border_width="1px",
159
+ block_title_background_fill_dark="#1e1c26",
160
+ input_background_fill_dark="#292733",
161
+ button_secondary_background_fill_dark="#24212b",
162
+ border_color_primary_dark="#343140",
163
+ background_fill_secondary_dark="#16141c",
164
+ color_accent_soft_dark="transparent"
165
+ ),
166
+ css=css,
167
+ retry_btn="Retry",
168
+ undo_btn="Undo",
169
+ clear_btn="Clear",
170
+ submit_btn="Send",
171
+ description="Chat with CausalLM 34B (8-bit GGUF)",
172
+ chatbot=gr.Chatbot(scale=1, placeholder=PLACEHOLDER)
173
+ )
174
+
175
+ if __name__ == "__main__":
176
+ demo.launch()