File size: 1,369 Bytes
a06056d
 
 
 
 
 
 
 
 
 
 
5516522
 
a06056d
5516522
a06056d
5516522
a06056d
 
 
 
 
 
 
5516522
a06056d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5516522
a06056d
5516522
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
from huggingface_hub import InferenceClient
import time

client = InferenceClient("mistralai/Mixtral-8x7B-Instruct-v0.1")
# client = InferenceClient("meta-llama/Llama-2-70b-chat-hf")

def split_list(lst, chunk_size):
  return [lst[i:i + chunk_size] for i in range(0, len(lst), chunk_size)]

def format_prompt(message, history, system_prompt):
  prompt = f"<s>[INST] <<SYS>>{system_prompt}<</SYS>> [/INST] </s>" if system_prompt else "<s>"
  
  for user_prompt in history:
    prompt += f"[INST] {user_prompt} [/INST]"
  
  prompt += f"[INST] {message} [/INST]"
  
  return prompt

def generate(
  prompt, system_prompt, history, shouldoverridehistory, historyoverride, max_new_tokens=1024, temperature=1.2, top_p=0.95, repetition_penalty=1.0,
):
  temperature = float(temperature)
  if temperature < 1e-2:
    temperature = 1e-2
  top_p = float(top_p)

  generate_kwargs = dict(
    temperature=temperature,
    max_new_tokens=max_new_tokens,
    top_p=top_p,
    repetition_penalty=repetition_penalty,
    do_sample=True,
    seed=round(time.time()),
  )

  if shouldoverridehistory:
    history = split_list(historyoverride[0], 2)

  formatted_prompt = format_prompt(prompt, history, system_prompt)
  print(formatted_prompt)

  stream = client.text_generation(formatted_prompt, **generate_kwargs, stream=False, details=True, return_full_text=False)
  
  return stream