Nick088 cappuch commited on
Commit
2bcefc7
1 Parent(s): 706064a

- ugh (e9731f7d53fed363fa12924e7bfc45e14f850afe)


Co-authored-by: Mikus <cappuch@users.noreply.huggingface.co>

Files changed (1) hide show
  1. app.py +25 -9
app.py CHANGED
@@ -3,16 +3,33 @@ import random
3
  import gradio as gr
4
  from groq import Groq
5
 
 
 
 
 
 
6
  def generate_response(prompt, history, model, temperature, max_tokens, top_p, seed):
7
- client = Groq(
8
- api_key = os.environ.get("Groq_Api_Key")
9
- )
 
 
 
 
 
 
 
 
 
 
 
 
10
 
11
  if seed == 0:
12
  seed = random.randint(1, 100000)
13
 
14
  stream = client.chat.completions.create(
15
- messages = history + [prompt],
16
  model=model,
17
  temperature=temperature,
18
  max_tokens=max_tokens,
@@ -31,10 +48,9 @@ def generate_response(prompt, history, model, temperature, max_tokens, top_p, se
31
 
32
  return response
33
 
34
- # Define the Gradio chat interface
35
  additional_inputs = [
36
- gr.Dropdown(choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"], value="llama3-70b-8192", label="LLM Model"),
37
- gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Temperature", info="Controls randomness of responses"),
38
  gr.Slider(minimum=1, maximum=4096, step=1, value=4096, label="Max Tokens", info="The maximum number of tokens that the model can process in a single response"),
39
  gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P", info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p."),
40
  gr.Number(precision=0, value=42, label="Seed", info="A starting point to initiate generation, use 0 for random")
@@ -44,6 +60,6 @@ gr.ChatInterface(
44
  fn=generate_response,
45
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
46
  additional_inputs=additional_inputs,
47
- title="Groq API LLMs AI Models",
48
- description="Using https://groq.com/ api, ofc as its free it will have some limitations of requests per minute, so its better if you duplicate this space with your own api key<br>Hugging Face Space by [Nick088](https://linktr.ee/Nick088)",
49
  ).launch()
 
3
  import gradio as gr
4
  from groq import Groq
5
 
6
+ client = Groq(
7
+ api_key = os.environ.get("Groq_Api_Key")
8
+ )
9
+
10
+
11
  def generate_response(prompt, history, model, temperature, max_tokens, top_p, seed):
12
+ messages = []
13
+ for i, data in enumerate(history):
14
+ if i % 2 == 0:
15
+ role = 'user'
16
+ else:
17
+ role = 'assistant'
18
+ message = {}
19
+ message["role"] = role
20
+ message["content"] = data
21
+ messages.append(message)
22
+
23
+ message = {}
24
+ message["role"] = "user"
25
+ message["content"] = prompt
26
+ messages.append(message)
27
 
28
  if seed == 0:
29
  seed = random.randint(1, 100000)
30
 
31
  stream = client.chat.completions.create(
32
+ messages=messages,
33
  model=model,
34
  temperature=temperature,
35
  max_tokens=max_tokens,
 
48
 
49
  return response
50
 
 
51
  additional_inputs = [
52
+ gr.Dropdown(choices=["llama3-70b-8192", "llama3-8b-8192", "mixtral-8x7b-32768", "llama2-70b-4096", "gemma-7b-it"], value="llama3-70b-8192", label="Model"),
53
+ gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Temperature", info="Controls diversity of the generated text. Lower is more deterministic, higher is more creative."),
54
  gr.Slider(minimum=1, maximum=4096, step=1, value=4096, label="Max Tokens", info="The maximum number of tokens that the model can process in a single response"),
55
  gr.Slider(minimum=0.0, maximum=1.0, step=0.01, value=0.5, label="Top P", info="A method of text generation where a model will only consider the most probable next tokens that make up the probability p."),
56
  gr.Number(precision=0, value=42, label="Seed", info="A starting point to initiate generation, use 0 for random")
 
60
  fn=generate_response,
61
  chatbot=gr.Chatbot(show_label=False, show_share_button=False, show_copy_button=True, likeable=True, layout="panel"),
62
  additional_inputs=additional_inputs,
63
+ title="Groq API UI",
64
+ description="Inference by Groq. Hugging Face Space by [Nick088](https://linktr.ee/Nick088)",
65
  ).launch()